code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''audio-spectrogram-transformer'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : int=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=1_0 , SCREAMING_SNAKE_CASE__ : int=1_0 , SCREAMING_SNAKE_CASE__ : Any=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , **SCREAMING_SNAKE_CASE__ : str , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = hidden_size
a_ : Any = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : Optional[Any] = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : List[str] = initializer_range
a_ : List[Any] = layer_norm_eps
a_ : int = patch_size
a_ : int = qkv_bias
a_ : int = frequency_stride
a_ : Union[str, Any] = time_stride
a_ : str = max_length
a_ : List[Any] = num_mel_bins
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''switch_transformers'''
snake_case__ : Optional[int] = ['''past_key_values''']
snake_case__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Dict=6_4 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=8 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 , SCREAMING_SNAKE_CASE__ : str="float32" , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1E-6 , SCREAMING_SNAKE_CASE__ : Dict=0.001 , SCREAMING_SNAKE_CASE__ : Any=0.001 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Any="relu" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
a_ : Optional[int] = vocab_size
a_ : List[str] = d_model
a_ : Tuple = d_kv
a_ : Optional[Any] = d_ff
a_ : List[Any] = num_sparse_encoder_layers
a_ : Any = num_layers
a_ : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
a_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ : Dict = num_heads
a_ : str = num_experts
a_ : Any = expert_capacity
a_ : List[Any] = router_bias
a_ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Optional[int] = router_dtype
a_ : int = router_ignore_padding_tokens
a_ : Any = relative_attention_num_buckets
a_ : List[str] = relative_attention_max_distance
a_ : Optional[Any] = dropout_rate
a_ : Tuple = layer_norm_epsilon
a_ : Dict = initializer_factor
a_ : Any = feed_forward_proj
a_ : Tuple = use_cache
a_ : str = add_router_probs
a_ : Optional[int] = router_z_loss_coef
a_ : List[str] = router_aux_loss_coef
a_ : int = self.feed_forward_proj.split('-' )
a_ : int = act_info[-1]
a_ : Optional[int] = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : Any = 'gelu_new'
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 32
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : int = "vit"
def __init__( self : Any , _lowerCAmelCase : Optional[int]=7_68 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=30_72 , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=1e-12 , _lowerCAmelCase : int=2_24 , _lowerCAmelCase : int=16 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=16 , **_lowerCAmelCase : Dict , ):
super().__init__(**_lowerCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : str = image_size
__snake_case : str = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : List[Any] = qkv_bias
__snake_case : Dict = encoder_stride
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : str = version.parse("1.11" )
@property
def snake_case__ ( self : Optional[int] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case__ ( self : Optional[Any] ):
return 1e-4
| 350
|
import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : list[list[int]] = [[0 for _ in range(UpperCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase__ : Tuple = 1
for n in range(m + 1 ):
for k in range(1 , UpperCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCAmelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_lowerCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 37
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCamelCase : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(_a )
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : int = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Union[str, Any] , _a : Any , _a : List[str]=0 ) -> List[str]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Any = torch.manual_seed(_a )
else:
__lowerCamelCase : Any = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
__lowerCamelCase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : int = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : List[str] ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : str ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : Any ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowercase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : str ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Dict = CLIPTextModel(_a )
__lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : List[str] = MultiControlNetModel([controlneta, controlneta] )
__lowerCamelCase : str = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Dict , _a : int , _a : Union[str, Any]=0 ) -> Union[str, Any]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(_a )
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Any = 2
__lowerCamelCase : Any = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
__lowerCamelCase : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Dict = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**_a )
pipe.to(_a )
__lowerCamelCase : Optional[int] = 10.0
__lowerCamelCase : Tuple = 4
__lowerCamelCase : str = self.get_dummy_inputs(_a )
__lowerCamelCase : int = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Optional[Any] = pipe(**_a )[0]
__lowerCamelCase : Dict = self.get_dummy_inputs(_a )
__lowerCamelCase : List[str] = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Dict = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Union[str, Any] = steps
__lowerCamelCase : Any = scale
__lowerCamelCase : Optional[int] = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Tuple = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : List[str] = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowercase ( self : Optional[Any] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : Dict ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : str ) -> int:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowercase ( self : List[Any] ) -> str:
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : List[Any] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCamelCase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
__lowerCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase : List[Any] = 'evil space-punk bird'
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__lowerCamelCase : Dict = pipe(
_a , _a , control_image=_a , generator=_a , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 208
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : str ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Union[str, Any] ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : str ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : int ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any:
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""]
lowerCamelCase__ : Any = unet_config["""layers_per_block"""]
lowerCamelCase__ : Any = unet_config["""attention_head_dim"""]
lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""]
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : Tuple = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
lowerCamelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Any = """mid_block.resnets.0"""
lowerCamelCase__ : Optional[Any] = """middle_block.0"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = """mid_block.attentions.0"""
lowerCamelCase__ : Dict = """middle_block.1"""
lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = """mid_block.resnets.1"""
lowerCamelCase__ : Tuple = """middle_block.2"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : Optional[int] = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""]
lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : Tuple =parser.parse_args()
_A : Optional[int] =strabool(args.class_cond)
_A : List[str] =os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : Any =TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : str =None
_A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Tuple =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : int =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_A : str =CMStochasticIterativeScheduler(**scheduler_config)
_A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 129
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_A : Optional[int] =pd.read_csv('''sample_data.csv''', header=None)
_A : Any =df.shape[:1][0]
# If you're using some other dataset input the target column
_A : List[str] =df.iloc[:, 1:2]
_A : int =actual_data.values.reshape(len_data, 1)
_A : Union[str, Any] =MinMaxScaler().fit_transform(actual_data)
_A : Optional[int] =10
_A : Union[str, Any] =5
_A : Union[str, Any] =20
_A : str =len_data - periods * look_back
_A : List[Any] =actual_data[:division]
_A : Optional[Any] =actual_data[division - look_back :]
_A , _A : Tuple =[], []
_A , _A : List[str] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_A : List[Any] =np.array(train_x)
_A : str =np.array(test_x)
_A : List[Any] =np.array([list(i.ravel()) for i in train_y])
_A : Any =np.array([list(i.ravel()) for i in test_y])
_A : Optional[Any] =Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_A : Dict =model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_A : List[str] =model.predict(x_test)
| 129
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __magic_name__ ( __lowerCAmelCase):
A: torch.FloatTensor
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase):
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase__ : int = 32 , lowerCamelCase__ : int = 64 , lowerCamelCase__ : int = 20 , lowerCamelCase__ : int = 768 , lowerCamelCase__ : Optional[Any]=77 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : str = "silu" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "linear" , lowerCamelCase__ : Optional[str] = "prd" , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Optional[Any] = attention_head_dim
UpperCamelCase__ : List[str] = num_attention_heads * attention_head_dim
UpperCamelCase__ : Union[str, Any] = additional_embeddings
UpperCamelCase__ : Union[str, Any] = time_embed_dim or inner_dim
UpperCamelCase__ : int = embedding_proj_dim or embedding_dim
UpperCamelCase__ : Optional[Any] = clip_embed_dim or embedding_dim
UpperCamelCase__ : Dict = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
UpperCamelCase__ : List[Any] = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
UpperCamelCase__ : int = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase__ : Optional[int] = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
UpperCamelCase__ : Dict = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
UpperCamelCase__ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
UpperCamelCase__ : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
UpperCamelCase__ : Any = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
UpperCamelCase__ : Union[str, Any] = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
UpperCamelCase__ : str = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='''gelu''' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
UpperCamelCase__ : int = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
UpperCamelCase__ : int = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
UpperCamelCase__ : Optional[Any] = nn.LayerNorm(lowerCamelCase__ )
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase__ : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , lowerCamelCase__ , persistent=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
UpperCamelCase__ : Optional[int] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = {}
def fn_recursive_add_processors(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
UpperCamelCase__ : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[torch.Tensor, float, int] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[torch.BoolTensor] = None , lowerCamelCase__ : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = hidden_states.shape[0]
UpperCamelCase__ : List[Any] = timestep
if not torch.is_tensor(lowerCamelCase__ ):
UpperCamelCase__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : List[str] = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase__ : int = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase__ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase__ : Optional[Any] = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
UpperCamelCase__ : Dict = self.embedding_proj_norm(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase__ : int = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCamelCase__ : Optional[int] = self.proj_in(lowerCamelCase__ )
UpperCamelCase__ : int = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase__ : Tuple = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase__ : Dict = hidden_states[:, None, :]
UpperCamelCase__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase__ : int = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase__ : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase__ : List[str] = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase__ : int = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase__ : Union[str, Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
UpperCamelCase__ : Any = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase__ : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase__ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase__ : List[str] = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
UpperCamelCase__ : Any = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
UpperCamelCase__ : Optional[int] = hidden_states[:, -1]
else:
UpperCamelCase__ : int = hidden_states[:, additional_embeddings_len:]
UpperCamelCase__ : List[str] = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 146
| 0
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCamelCase : int = TypeVar('''T''')
class __snake_case ( Generic[T] ):
def __init__( self : List[Any] , _lowercase : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
SCREAMING_SNAKE_CASE__ = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f"""{self.data}"""
class __snake_case ( Generic[T] ):
def __init__( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
def __iter__( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.top
while node:
yield node.data
SCREAMING_SNAKE_CASE__ = node.next
def __str__( self : str ):
"""simple docstring"""
return "->".join([str(_lowercase ) for item in self] )
def __len__( self : str ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def __a ( self : Dict ):
"""simple docstring"""
return self.top is None
def __a ( self : Union[str, Any] , _lowercase : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Node(_lowercase )
if not self.is_empty():
SCREAMING_SNAKE_CASE__ = self.top
SCREAMING_SNAKE_CASE__ = node
def __a ( self : Tuple ):
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _lowercase )
SCREAMING_SNAKE_CASE__ = self.top
SCREAMING_SNAKE_CASE__ = self.top.next
return pop_node.data
def __a ( self : List[Any] ):
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371
|
from __future__ import annotations
__lowerCamelCase : Tuple = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ = 0
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__lowerCamelCase : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 204
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase_ : int = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase_ : str = typing.Union[np.floataa, int, float] # noqa: UP007
def _A (__a , __a ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) )
def _A (__a , __a ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__a , __a ) ) ** (1 / 2)
if __name__ == "__main__":
def _A () -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 91
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase = max(
mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowercase = val
return f[i][j]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowercase = len(__SCREAMING_SNAKE_CASE )
if num_items != len(__SCREAMING_SNAKE_CASE ):
lowercase = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ):
lowercase = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = set()
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
optimal_set.add(__SCREAMING_SNAKE_CASE )
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 195
| 0
|
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Any = []
UpperCamelCase :int = 1
while len(lowercase__ ) < 1E6:
constant.append(str(lowercase__ ) )
i += 1
UpperCamelCase :Union[str, Any] = """""".join(lowercase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 363
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Tuple="attention" ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCamelCase :Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCamelCase :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCamelCase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCamelCase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
if split_mlp_wi:
UpperCamelCase :Tuple = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCamelCase :Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCamelCase :Union[str, Any] = (wi_a, wi_a)
else:
UpperCamelCase :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCamelCase :List[str] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , *, __magic_name__ : int , __magic_name__ : bool , __magic_name__ : bool = False ) -> List[str]:
"""simple docstring"""
UpperCamelCase :str = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase :int = {"""/""".join(__magic_name__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __magic_name__ )
UpperCamelCase :Tuple = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :List[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = tax_attention_lookup(__magic_name__ , __magic_name__ , """encoder""" , """attention""" )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :str = k.T
UpperCamelCase :int = o.T
UpperCamelCase :Optional[int] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :List[Any] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """encoder""" , __magic_name__ )
UpperCamelCase :Dict = layer_norm
if split_mlp_wi:
UpperCamelCase :Union[str, Any] = wi[0].T
UpperCamelCase :List[str] = wi[1].T
else:
UpperCamelCase :str = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[Any] = tax_relpos_bias_lookup(
__magic_name__ , __magic_name__ , """encoder""" ).T
UpperCamelCase :Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
UpperCamelCase :Optional[Any] = tax_relpos_bias_lookup(
__magic_name__ , 0 , """encoder""" ).T
UpperCamelCase :str = tax_relpos_bias_lookup(
__magic_name__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """self_attention""" )
UpperCamelCase :Any = layer_norm
UpperCamelCase :Tuple = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :List[Any] = q.T
UpperCamelCase :Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :List[str] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase :Union[str, Any] = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Union[str, Any] = o.T
UpperCamelCase :Optional[Any] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """decoder""" , __magic_name__ )
UpperCamelCase :Optional[int] = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Any = wi.T
UpperCamelCase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Optional[int] = tax_relpos_bias_lookup(__magic_name__ , __magic_name__ , """decoder""" ).T
UpperCamelCase :int = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Dict = old["""decoder/logits_dense/kernel"""].T
return new
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : bool ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase :Dict = state_dict["""shared.weight"""]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = checkpoints.load_tax_checkpoint(__magic_name__ )
UpperCamelCase :Optional[Any] = convert_tax_to_pytorch(
__magic_name__ , num_layers=config.num_layers , is_encoder_only=__magic_name__ , scalable_attention=__magic_name__ )
UpperCamelCase :Optional[int] = make_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Tuple = MTaConfig.from_json_file(__magic_name__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :Optional[Any] = UMTaEncoderModel(__magic_name__ )
else:
UpperCamelCase :Tuple = UMTaForConditionalGeneration(__magic_name__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__magic_name__ )
# Verify that we can load the checkpoint.
model.from_pretrained(__magic_name__ )
print("""Done""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase_ : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 62
| 0
|
a_ :str = 256
# Modulus to hash a string
a_ :List[Any] = 1_000_003
def lowercase_ (A : Optional[Any] , A : str ):
snake_case__ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
snake_case__ : int = len(SCREAMING_SNAKE_CASE__ )
if p_len > t_len:
return False
snake_case__ : Dict = 0
snake_case__ : List[Any] = 0
snake_case__ : str = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE__ ):
snake_case__ : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase_ ():
snake_case__ : Optional[int] = """abc1abc12"""
snake_case__ : Optional[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
snake_case__ : Optional[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 2)
snake_case__ : Dict = """ABABX"""
snake_case__ : List[str] = """ABABZABABYABABX"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 3)
snake_case__ : List[Any] = """AAAB"""
snake_case__ : int = """ABAAAAAB"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 4)
snake_case__ : Optional[int] = """abcdabcy"""
snake_case__ : Optional[Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test 5)
snake_case__ : Optional[Any] = """Lü"""
snake_case__ : Optional[int] = """Lüsai"""
assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case__ : List[str] = """Lue"""
assert not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 277
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase : str = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
lowercase : Dict = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
lowercase : int = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return float((preds == labels).mean() )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Any = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : Union[str, Any] = float(pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
lowercase : Dict = float(spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case ,snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case ,snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case ,snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case ,snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 20
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
A_ = logging.get_logger(__name__)
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self: Tuple, **a_: List[Any] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : Tuple = deprecated_arg[3:]
setattr(self, a_, not kwargs.pop(a_ ) )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
_snake_case : Tuple = kwargs.pop("""torchscript""", self.torchscript )
_snake_case : str = kwargs.pop("""torch_xla_tpu_print_metrics""", self.torch_xla_tpu_print_metrics )
_snake_case : str = kwargs.pop("""fp16_opt_level""", self.fpaa_opt_level )
super().__init__(**a_ )
lowercase__ = field(default=__a , metadata={"help": "Trace the models using torchscript"} )
lowercase__ = field(default=__a , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
lowercase__ = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
requires_backends(self, ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
_snake_case : Any = torch.device("""cpu""" )
_snake_case : int = 0
elif is_torch_tpu_available():
_snake_case : Optional[Any] = xm.xla_device()
_snake_case : int = 0
else:
_snake_case : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
_snake_case : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
requires_backends(self, ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
requires_backends(self, ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
requires_backends(self, ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return self.n_gpu > 0
| 132
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = WavaVecaPhonemeCTCTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[Any] = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_snake_case : List[str] = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : Union[str, Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: List[str]=False, a_: str=20, a_: Optional[int]=5 ):
'''simple docstring'''
_snake_case : Union[str, Any] = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=a_ )) for i in range(len(a_ ) )]
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], do_phonemize=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Dict = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : str = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Optional[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : str = """ """ + output_txt
_snake_case : Tuple = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int, **a_: List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_snake_case : Optional[Any] = tokenizer("""m xxx ɪ""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_snake_case : Union[str, Any] = tokenizer("""m aaa ɪ ccc""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_snake_case : Any = tokenizer("""maɪ c""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [3, 200] ) # mai should be <unk> (=3)
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Tuple = """Hello how are you"""
_snake_case : Optional[int] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(a_, """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : int = """Hello how are you"""
_snake_case : int = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a_ ).input_ids, tokenizer(a_, do_phonemize=a_ ).input_ids )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Tuple = """Hello how are you"""
_snake_case : List[str] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Optional[Any] = tokenizer.decode(tokenizer(a_ ).input_ids )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_snake_case : List[Any] = tokenizer.decode(sample_ids[0] )
_snake_case : str = tokenizer.batch_decode(a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : Union[str, Any] = """Hello how are you"""
_snake_case : Tuple = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(a_, """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : int = """Hello how are you"""
_snake_case : Dict = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a_ ).input_ids, tokenizer(a_, do_phonemize=a_ ).input_ids )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_snake_case : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_snake_case : List[str] = tokenizer.decode(sample_ids[0] )
_snake_case : Optional[int] = tokenizer.batch_decode(a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_snake_case : int = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=a_ )
_snake_case : Dict = tokenizer.batch_decode(a_, filter_word_delimiter_token=a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : str = """Hello how are you"""
_snake_case : List[Any] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Union[str, Any] = tokenizer.decode(tokenizer(a_ ).input_ids, filter_word_delimiter_token=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : Any = """Hello how are you"""
_snake_case : Any = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Any = tokenizer.decode(tokenizer(a_ ).input_ids, filter_word_delimiter_token=a_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip(), a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token=a_ )
_snake_case : str = """Hello how are you"""
_snake_case : str = tokenizer(a_, phonemizer_lang="""en-us""" ).input_ids
_snake_case : List[str] = tokenizer(a_, phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(a_, a_ )
_snake_case : Dict = tokenizer.decode(a_ )
_snake_case : Union[str, Any] = tokenizer.decode(a_ )
self.assertEqual(a_, """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(a_, """ɛ l o h aʊ a ʁ j u""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Optional[Any] = """Hello how Are you"""
_snake_case : Optional[int] = """hello how are you"""
_snake_case : List[str] = tokenizer(a_ ).input_ids
_snake_case : int = tokenizer(a_ ).input_ids
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_snake_case : List[Any] = tokenizer.batch_decode(a_ )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCamelCase_ ( a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_snake_case : Optional[int] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_snake_case : Any = tokenizer.decode(a_, output_char_offsets=a_, filter_word_delimiter_token=a_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ), 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(a_, a_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""], """char""" ) ), outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """char""" ), ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """start_offset""" ), [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """end_offset""" ), [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(a_: List[str], a_: Optional[Any] ):
self.assertTrue(isinstance(a_, a_ ) )
self.assertTrue(isinstance(outputs_list[0], a_ ) )
# transform list to ModelOutput
_snake_case : int = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""], outputs_batch_a["""text"""] )
def recursive_check(a_: Union[str, Any], a_: Optional[Any] ):
if isinstance(a_, a_ ):
[recursive_check(a_, a_ ) for la, la in zip(a_, a_ )]
self.assertEqual(a_, a_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""], outputs_batch_a["""char_offsets"""] )
# fmt: off
_snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_snake_case : int = tokenizer.batch_decode(a_, output_char_offsets=a_ )
_snake_case : Union[str, Any] = [tokenizer.decode(a_, output_char_offsets=a_ ) for ids in sample_ids]
check_list_tuples_equal(a_, a_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[Any] = tokenizer.vocab_size
_snake_case : int = len(a_ )
self.assertNotEqual(a_, 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : Optional[int] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_snake_case : Any = tokenizer.add_tokens(a_ )
_snake_case : Tuple = tokenizer.vocab_size
_snake_case : List[str] = len(a_ )
self.assertNotEqual(a_, 0 )
self.assertEqual(a_, a_ )
self.assertEqual(a_, len(a_ ) )
self.assertEqual(a_, all_size + len(a_ ) )
_snake_case : Optional[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""", add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ), 4 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
_snake_case : Optional[int] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_snake_case : Dict = tokenizer.add_special_tokens(a_ )
_snake_case : List[str] = tokenizer.vocab_size
_snake_case : List[Any] = len(a_ )
self.assertNotEqual(a_, 0 )
self.assertEqual(a_, a_ )
self.assertEqual(a_, len(a_ ) )
self.assertEqual(a_, all_size_a + len(a_ ) )
_snake_case : Dict = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""", add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ), 6 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0], tokens[1] )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokens[-4] )
self.assertEqual(tokens[0], tokenizer.eos_token_id )
self.assertEqual(tokens[-3], tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : str = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(output["""text"""], a_ )
| 132
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
__snake_case : str ='▁'
__snake_case : int ={'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__snake_case : int ={
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__snake_case : Any ={'vinai/bartpho-syllable': 1_0_2_4}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =["""input_ids""", """attention_mask"""]
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase="<s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="</s>" ,__lowerCamelCase="<s>" ,__lowerCamelCase="<unk>" ,__lowerCamelCase="<pad>" ,__lowerCamelCase="<mask>" ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,)
lowerCAmelCase__ : List[Any] = vocab_file
lowerCAmelCase__ : int = monolingual_vocab_file
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Tuple = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : int = cnt
cnt += 1
with open(__lowerCamelCase ,'''r''' ,encoding='''utf-8''' ) as f:
for line in f.readlines():
lowerCAmelCase__ : Optional[int] = line.strip().split()[0]
lowerCAmelCase__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : List[Any] = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.__dict__.copy()
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
lowerCAmelCase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Tuple = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase ,''' ''' ).strip()
return out_string
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Any = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Tuple = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase ,'''wb''' ) as fi:
lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,__lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 129
|
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
for i in range(0 ,lowerCamelCase_):
for _ in range(0 ,n - i - 1): # printing spaces
print(''' ''' ,end='''''')
for _ in range(0 ,i + 1): # printing stars
print('''* ''' ,end='''''')
print()
def lowerCAmelCase__ ( lowerCamelCase_ : str):
'''simple docstring'''
for i in range(lowerCamelCase_ ,0 ,-1):
for _ in range(lowerCamelCase_ ,0 ,-1): # printing stars
print('''* ''' ,end='''''')
print()
for _ in range(n - i + 1 ,0 ,-1): # printing spaces
print(''' ''' ,end='''''')
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''')
return
floyd(lowerCamelCase_) # upper half
reverse_floyd(lowerCamelCase_) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__snake_case : int =1
while K:
__snake_case : Optional[int] =int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__snake_case : str =int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 129
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : List[str] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : List[Any] ='''vivit'''
def __init__( self , _lowerCamelCase=2_2_4 , _lowerCamelCase=3_2 , _lowerCamelCase=[2, 1_6, 1_6] , _lowerCamelCase=3 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu_fast" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-06 , _lowerCamelCase=True , **_lowerCamelCase , ):
UpperCamelCase_: int = hidden_size
UpperCamelCase_: List[str] = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: int = hidden_act
UpperCamelCase_: Any = hidden_dropout_prob
UpperCamelCase_: int = attention_probs_dropout_prob
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: Dict = image_size
UpperCamelCase_: Optional[int] = num_frames
UpperCamelCase_: Optional[Any] = tubelet_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Optional[int] = qkv_bias
super().__init__(**_lowerCamelCase )
| 366
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A_ : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def snake_case (UpperCAmelCase__ ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCamelCase_: List[str] = k.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return k
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> PegasusForConditionalGeneration:
UpperCamelCase_: List[str] = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusConfig(**UpperCAmelCase__ )
UpperCamelCase_: Tuple = PegasusForConditionalGeneration(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = torch_model.model.state_dict()
UpperCamelCase_: str = {}
for k, v in tf_weights.items():
UpperCamelCase_: Dict = rename_state_dict_key(UpperCAmelCase__ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
UpperCamelCase_: int = v.T
UpperCamelCase_: Union[str, Any] = torch.tensor(UpperCAmelCase__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
UpperCamelCase_: Tuple = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCamelCase_: int = mapping['shared.weight']
UpperCamelCase_: Union[str, Any] = mapping['shared.weight']
UpperCamelCase_: Dict = {k: torch.zeros_like(UpperCAmelCase__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**UpperCAmelCase__ )
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = torch_model.model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
UpperCamelCase_: List[str] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def snake_case (UpperCAmelCase__="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCamelCase_: Union[str, Any] = tf.train.list_variables(UpperCAmelCase__ )
UpperCamelCase_: Tuple = {}
UpperCamelCase_: Dict = ['Adafactor', 'global_step']
for name, shape in tqdm(UpperCAmelCase__ , desc='converting tf checkpoint to dict' ):
UpperCamelCase_: Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase_: Dict = tf.train.load_variable(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = array
return tf_weights
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
# save tokenizer first
UpperCamelCase_: Any = Path(UpperCAmelCase__ ).parent.name
UpperCamelCase_: Tuple = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
UpperCamelCase_: Optional[Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=UpperCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase__ )
# convert model
UpperCamelCase_: Optional[Any] = get_tf_weights_as_numpy(UpperCAmelCase__ )
UpperCamelCase_: Any = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
UpperCamelCase_: Union[str, Any] = task_specific_params
UpperCamelCase_: Tuple = convert_pegasus(UpperCAmelCase__ , UpperCAmelCase__ )
torch_model.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: int = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(UpperCAmelCase__ , Path(UpperCAmelCase__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Optional[Any] = parser.parse_args()
if args.save_dir is None:
A_ : Union[str, Any] = Path(args.tf_ckpt_path).parent.name
A_ : Optional[Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 292
| 0
|
'''simple docstring'''
from math import factorial
def _A ( snake_case = 1_00 ) -> Optional[int]:
return sum(map(snake_case , str(factorial(snake_case ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 250
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCamelCase : List[Any] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
lowerCamelCase : Any = "▁"
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''token_type_ids''']
UpperCamelCase = FNetTokenizer
def __init__( self : Optional[int] , A_ : Any=None , A_ : int=None , A_ : int=False , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Tuple="<unk>" , A_ : Optional[int]="[SEP]" , A_ : List[Any]="<pad>" , A_ : Optional[int]="[CLS]" , A_ : Optional[Any]="[MASK]" , **A_ : Dict , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 204
| 0
|
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
def merge(A__ , A__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(A__ ) <= 1:
return collection
snake_case = len(A__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by a comma:\n").strip()
_A = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 137
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
_A = {"mobilebert-uncased": 5_12}
_A = {}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = MobileBertTokenizer
def __init__(self : Any , _A : str=None , _A : str=None , _A : Union[str, Any]=True , _A : Optional[Any]="[UNK]" , _A : int="[SEP]" , _A : Dict="[PAD]" , _A : int="[CLS]" , _A : Union[str, Any]="[MASK]" , _A : Any=True , _A : Dict=None , **_A : List[str] , ) -> List[str]:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
snake_case = getattr(_A , normalizer_state.pop("type" ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**_A )
snake_case = do_lower_case
def UpperCAmelCase(self : List[str] , _A : Union[str, Any] , _A : Dict=None ) -> Optional[Any]:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase(self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase(self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 137
| 1
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase_ = len(UpperCamelCase ) - 1
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCamelCase ) , 5 ) == 1
return output_values
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase_ = self.basis_function(UpperCamelCase )
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case ( self , UpperCamelCase = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase_ = [] # x coordinates of points to plot
lowerCamelCase_ = [] # y coordinates of points to plot
lowerCamelCase_ = 0.0
while t <= 1:
lowerCamelCase_ = self.bezier_curve_function(UpperCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase_ = [i[0] for i in self.list_of_points]
lowerCamelCase_ = [i[1] for i in self.list_of_points]
plt.plot(
UpperCamelCase , UpperCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(UpperCamelCase , UpperCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 55
|
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
| 0
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase__ ) for s in shape] )}.npy"""
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str=0 , lowerCamelCase__ : int=(4, 4, 64, 64) , lowerCamelCase__ : Union[str, Any]=False ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ )
return image
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Dict="CompVis/stable-diffusion-v1-4" ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase : Union[str, Any] = "bf16" if fpaa else None
_UpperCAmelCase : Any = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__ , subfolder="unet" , dtype=lowerCamelCase__ , revision=lowerCamelCase__ )
return model, params
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : List[Any]=(4, 77, 7_68) , lowerCamelCase__ : Optional[int]=False ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 10_00, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.get_latents(lowerCamelCase__ , fpaa=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_encoder_hidden_states(lowerCamelCase__ , fpaa=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model.apply(
{"params": params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase : str = jnp.array(lowerCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 10_00, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.get_latents(lowerCamelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.get_encoder_hidden_states(lowerCamelCase__ , shape=(4, 77, 10_24) , fpaa=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = model.apply(
{"params": params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase : List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase : Dict = jnp.array(lowerCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
| 350
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("CPU" , font_size=24 )
_UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("GPU" , font_size=24 )
_UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
_UpperCAmelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.4_6 / 4
_UpperCAmelCase : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a :Union[str, Any] = logging.get_logger(__name__)
a :Optional[Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = """instructblip_vision_model"""
def __init__( self , _a=1_408 , _a=6_144 , _a=39 , _a=16 , _a=224 , _a=14 , _a="gelu" , _a=1E-6 , _a=0.0 , _a=1E-1_0 , _a=True , **_a , ) -> Any:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Dict = image_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = qkv_bias
@classmethod
def _a ( cls , _a , **_a ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
SCREAMING_SNAKE_CASE__ : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = """instructblip_qformer"""
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=1E-1_2 , _a=0 , _a="absolute" , _a=2 , _a=1_408 , **_a , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_a , **_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Any = cross_attention_frequency
SCREAMING_SNAKE_CASE__ : Tuple = encoder_hidden_size
@classmethod
def _a ( cls , _a , **_a ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = cls.get_config_dict(_a , **_a )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
SCREAMING_SNAKE_CASE__ : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = """instructblip"""
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
def __init__( self , _a=None , _a=None , _a=None , _a=32 , **_a ) -> List[Any]:
"""simple docstring"""
super().__init__(**_a )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
SCREAMING_SNAKE_CASE__ : Tuple = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
SCREAMING_SNAKE_CASE__ : Dict = InstructBlipVisionConfig(**_a )
SCREAMING_SNAKE_CASE__ : Any = InstructBlipQFormerConfig(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CONFIG_MAPPING[text_model_type](**_a )
SCREAMING_SNAKE_CASE__ : Dict = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE__ : str = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE__ : Dict = num_query_tokens
SCREAMING_SNAKE_CASE__ : Any = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1.0
SCREAMING_SNAKE_CASE__ : int = 0.02
@classmethod
def _a ( cls , _a , _a , _a , **_a , ) -> int:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Any = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : int = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE__ : Tuple = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = self.__class__.model_type
return output
| 132
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=2 , _a=24 , _a=16 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , _a=2 , _a=2 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : List[str] = frequency_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ : Any = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ : Any = num_patches + 2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_values, labels
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ASTModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Any = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
_SCREAMING_SNAKE_CASE :Tuple = False
def _a ( self , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _a ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(_a )
SCREAMING_SNAKE_CASE__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Dict = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_audio()
SCREAMING_SNAKE_CASE__ : List[str] = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(_a , sampling_rate=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 132
| 1
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = ['''input_values''', '''attention_mask''']
def __init__( self, A = 1, A = 16_000, A = 0.0, A = False, A = 80, A = 16, A = 64, A = "hann_window", A = 1.0, A = 80, A = 7_600, A = 1E-10, A = 2, A = True, **A, ):
'''simple docstring'''
super().__init__(feature_size=A, sampling_rate=A, padding_value=A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = return_attention_mask
SCREAMING_SNAKE_CASE : str = num_mel_bins
SCREAMING_SNAKE_CASE : List[Any] = hop_length
SCREAMING_SNAKE_CASE : int = win_length
SCREAMING_SNAKE_CASE : List[Any] = win_function
SCREAMING_SNAKE_CASE : Optional[int] = frame_signal_scale
SCREAMING_SNAKE_CASE : Any = fmin
SCREAMING_SNAKE_CASE : str = fmax
SCREAMING_SNAKE_CASE : Optional[int] = mel_floor
SCREAMING_SNAKE_CASE : List[Any] = reduction_factor
SCREAMING_SNAKE_CASE : str = win_length * sampling_rate // 1_000
SCREAMING_SNAKE_CASE : Optional[int] = hop_length * sampling_rate // 1_000
SCREAMING_SNAKE_CASE : str = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE : Dict = (self.n_fft // 2) + 1
SCREAMING_SNAKE_CASE : int = window_function(window_length=self.sample_size, name=self.win_function, periodic=A )
SCREAMING_SNAKE_CASE : int = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm='slaney', mel_scale='slaney', )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers', A, )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers', A, )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( A, A, A = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
SCREAMING_SNAKE_CASE : Any = np.array(A, np.intaa )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for vector, length in zip(A, attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE : int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE : Dict = padding_value
normed_input_values.append(A )
else:
SCREAMING_SNAKE_CASE : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def UpperCamelCase_ ( self, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
A, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel='log10', )
return log_mel_spec.T
def __call__( self, A = None, A = None, A = False, A = None, A = False, A = None, A = None, A = None, A = None, **A, ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._process_audio(
A, A, A, A, A, A, A, A, **A, )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self._process_audio(
A, A, A, A, A, A, A, A, **A, )
if inputs is None:
return inputs_target
else:
SCREAMING_SNAKE_CASE : Tuple = inputs_target['input_values']
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE : int = decoder_attention_mask
return inputs
def UpperCamelCase_ ( self, A, A = False, A = False, A = None, A = False, A = None, A = None, A = None, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = isinstance(A, np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : Tuple = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : str = [np.asarray(A, dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : str = [speech]
# needed to make pad() work on spectrogram inputs
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_size
# convert into correct format for padding
if is_target:
SCREAMING_SNAKE_CASE : str = [self._extract_mel_features(A ) for waveform in speech]
SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature({'input_values': features} )
SCREAMING_SNAKE_CASE : int = self.num_mel_bins
else:
SCREAMING_SNAKE_CASE : List[str] = BatchFeature({'input_values': speech} )
SCREAMING_SNAKE_CASE : List[str] = self.pad(
A, padding=A, max_length=A, truncation=A, pad_to_multiple_of=A, return_attention_mask=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = feature_size_hack
# convert input values to correct format
SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs['input_values']
if not isinstance(input_values[0], np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for array in input_values]
elif (
not isinstance(A, np.ndarray )
and isinstance(input_values[0], np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
SCREAMING_SNAKE_CASE : Tuple = [array.astype(np.floataa ) for array in input_values]
elif isinstance(A, np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : List[str] = input_values.astype(np.floataa )
# convert attention_mask to correct format
SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(A, dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
attention_mask
if self._get_padding_strategies(A, max_length=A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['input_values'], attention_mask=A, padding_value=self.padding_value )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs.convert_to_tensors(A )
return padded_inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
SCREAMING_SNAKE_CASE : str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 246
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.framework == "tf":
SCREAMING_SNAKE_CASE : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=A )
else:
raise ValueError('Unsupported framework' )
return masked_index
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_masked_index(A )
SCREAMING_SNAKE_CASE : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask', self.model.base_model_prefix, F"No mask_token ({self.tokenizer.mask_token}) found on the input", )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def UpperCamelCase_ ( self, A, A=None, **A ):
'''simple docstring'''
if return_tensors is None:
SCREAMING_SNAKE_CASE : Dict = self.framework
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(A, return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model(**A )
SCREAMING_SNAKE_CASE : List[str] = model_inputs['input_ids']
return model_outputs
def UpperCamelCase_ ( self, A, A=5, A=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE : List[str] = target_ids.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE : Dict = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE : Tuple = outputs.numpy()
SCREAMING_SNAKE_CASE : Any = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE : List[Any] = stable_softmax(A, axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.gather_nd(tf.squeeze(A, 0 ), target_ids.reshape(-1, 1 ) )
SCREAMING_SNAKE_CASE : Optional[int] = tf.expand_dims(A, 0 )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.math.top_k(A, k=A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE : Optional[int] = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE : Any = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE : int = probs[..., target_ids]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = probs.topk(A )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist() ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for v, p in zip(_values, _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE : Tuple = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE : Any = target_ids[p].tolist()
SCREAMING_SNAKE_CASE : List[Any] = p
# Filter padding out:
SCREAMING_SNAKE_CASE : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : List[Any] = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : List[Any] = [targets]
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[str] = []
for target in targets:
SCREAMING_SNAKE_CASE : Dict = vocab.get(A, A )
if id_ is None:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
A, add_special_tokens=A, return_attention_mask=A, return_token_type_ids=A, max_length=1, truncation=A, )['input_ids']
if len(A ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE : List[str] = list(set(A ) )
if len(A ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE : Any = np.array(A )
return target_ids
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE : Any = self.get_target_ids(A, A )
SCREAMING_SNAKE_CASE : str = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask', self.model.base_model_prefix, 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self, A, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = super().__call__(A, **A )
if isinstance(A, A ) and len(A ) == 1:
return outputs[0]
return outputs
| 246
| 1
|
from sklearn.metrics import recall_score
import datasets
__snake_case = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__snake_case = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__snake_case = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=1 , snake_case__="binary" , snake_case__=None , snake_case__="warn" , ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict =recall_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase , zero_division=__UpperCamelCase , )
return {"recall": float(__UpperCamelCase ) if score.size == 1 else score}
| 348
|
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292
| 0
|
"""simple docstring"""
from manim import *
class _A ( lowerCAmelCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.2_5 , width=0.2_5 )
lowercase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""CPU""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""GPU""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""Model""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
lowercase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""Loaded Checkpoint""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCAmelCase )
lowercase = []
lowercase = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase = fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
ckpt_arr.append(__lowerCAmelCase )
lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
lowercase = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase = Text("""Disk""" , font_size=24 )
lowercase = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
lowercase = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(FadeOut(__lowerCAmelCase ) )
lowercase = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , *__lowerCAmelCase ) , )
self.wait()
| 32
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=[]):
SCREAMING_SNAKE_CASE = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE = np.ones((size_y, size_x) , dtype=np.uinta) * 255
SCREAMING_SNAKE_CASE = np.pad(_UpperCAmelCase , mode='linear_ramp' , pad_width=_UpperCAmelCase , end_values=0)
if "l" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return max(_UpperCAmelCase , min(_UpperCAmelCase , _UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return (
clamp(rect[0] , min[0] , max[0]),
clamp(rect[1] , min[1] , max[1]),
clamp(rect[2] , min[0] , max[0]),
clamp(rect[3] , min[1] , max[1]),
)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(_UpperCAmelCase)
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE = clamp_rect(_UpperCAmelCase , [0, 0] , [image_size[0], image_size[1]])
return rect
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]))
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1])) , (0, 0) , )
result.paste(_UpperCAmelCase , (original_slice, 0))
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE = tile.crop(_UpperCAmelCase)
return tile
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = n % d
return n - divisor
class _snake_case ( A__ ):
def __init__( self , a , a , a , a , a , a , a = 350 , ) -> int:
super().__init__(
vae=a , text_encoder=a , tokenizer=a , unet=a , low_res_scheduler=a , scheduler=a , max_noise_level=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , **a) -> Tuple:
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size),
min(image.size[0] , (x + 1) * tile_size),
min(image.size[1] , (y + 1) * tile_size),
)
SCREAMING_SNAKE_CASE = add_overlap_rect(a , a , image.size)
SCREAMING_SNAKE_CASE = image.crop(a)
SCREAMING_SNAKE_CASE = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE = max(0 , a)
SCREAMING_SNAKE_CASE = squeeze_tile(a , a , a , a)
SCREAMING_SNAKE_CASE = to_input.size
SCREAMING_SNAKE_CASE = to_input.resize((tile_size, tile_size) , Image.BICUBIC)
SCREAMING_SNAKE_CASE = super(a , self).__call__(image=a , **a).images[0]
SCREAMING_SNAKE_CASE = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC)
SCREAMING_SNAKE_CASE = unsqueeze_tile(a , a)
SCREAMING_SNAKE_CASE = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC)
SCREAMING_SNAKE_CASE = []
if x == 0:
remove_borders.append('l')
elif crop_rect[2] == image.size[0]:
remove_borders.append('r')
if y == 0:
remove_borders.append('t')
elif crop_rect[3] == image.size[1]:
remove_borders.append('b')
SCREAMING_SNAKE_CASE = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=a) , mode='L' , )
final_image.paste(
a , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , a)
@torch.no_grad()
def __call__( self , a , a , a = 75 , a = 9.0 , a = 50 , a = None , a = 1 , a = 0.0 , a = None , a = None , a = None , a = 1 , a = 128 , a = 32 , a = 32 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4))
SCREAMING_SNAKE_CASE = math.ceil(image.size[0] / tile_size)
SCREAMING_SNAKE_CASE = math.ceil(image.size[1] / tile_size)
SCREAMING_SNAKE_CASE = tcx * tcy
SCREAMING_SNAKE_CASE = 0
for y in range(a):
for x in range(a):
self._process_tile(
a , a , a , a , a , a , a , prompt=a , num_inference_steps=a , guidance_scale=a , noise_level=a , negative_prompt=a , num_images_per_prompt=a , eta=a , generator=a , latents=a , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image})
return final_image
def lowerCamelCase__ ():
# Run a demo
SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE = StableDiffusionTiledUpscalePipeline.from_pretrained(_UpperCAmelCase , revision='fp16' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = pipe.to('cuda')
SCREAMING_SNAKE_CASE = Image.open('../../docs/source/imgs/diffusers_library.jpg')
def callback(_UpperCAmelCase):
print(F'''progress: {obj['progress']:.4f}''')
obj["image"].save('diffusers_library_progress.jpg')
SCREAMING_SNAKE_CASE = pipe(image=_UpperCAmelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=_UpperCAmelCase)
final_image.save('diffusers_library.jpg')
if __name__ == "__main__":
main()
| 137
|
import os
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'triangle.txt')
with open(_UpperCAmelCase) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
for line in triangle:
SCREAMING_SNAKE_CASE = []
for number in line.strip().split(' '):
numbers_from_line.append(int(_UpperCAmelCase))
a.append(_UpperCAmelCase)
for i in range(1 , len(_UpperCAmelCase)):
for j in range(len(a[i])):
SCREAMING_SNAKE_CASE = a[i - 1][j] if j != len(a[i - 1]) else 0
SCREAMING_SNAKE_CASE = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase)
return max(a[-1])
if __name__ == "__main__":
print(solution())
| 137
| 1
|
"""simple docstring"""
__lowerCamelCase = tuple[float, float, float]
__lowerCamelCase = tuple[float, float, float]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return tuple(round(UpperCamelCase__ , UpperCamelCase__ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 10 ):
"""simple docstring"""
A__ = create_vector(UpperCamelCase__ , UpperCamelCase__ )
A__ = create_vector(UpperCamelCase__ , UpperCamelCase__ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
| 356
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowerCamelCase = 20_48
__lowerCamelCase = 40_96
__lowerCamelCase = 42
__lowerCamelCase = os.environ.pop("PROCESS_TRAIN", "false")
__lowerCamelCase = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def choose_first(UpperCamelCase__ , UpperCamelCase__=False ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
A__ = {'id': example['id']}
A__ = example['annotations']
A__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ['yes'] if 1 in yes_no_answer else ['no']
A__ = A__ = []
A__ = A__ = []
A__ = ['<cls>']
else:
A__ = ['short']
A__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
A__ = ['long']
A__ = choose_first(annotation['long_answer'] , is_long_answer=UpperCamelCase__ )
A__ = []
answer.update(UpperCamelCase__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , UpperCamelCase__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
A__ = _get_single_answer(UpperCamelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example['document']['tokens']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(UpperCamelCase__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example['document']['tokens']
A__ = answer['start_token']
A__ = answer['end_token']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc['is_html'][answer['start_token'] : answer['end_token']]
A__ = doc['token'][answer['start_token'] : answer['end_token']]
A__ = ' '.join([old[i] for i in range(len(UpperCamelCase__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , UpperCamelCase__ , end='\n' )
print('Old:' , UpperCamelCase__ , end='\n\n' )
return {
"context": " ".join(UpperCamelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=2_048 , UpperCamelCase__=4_096 , UpperCamelCase__=True ):
"""simple docstring"""
A__ = get_context_and_ans(UpperCamelCase__ , assertion=UpperCamelCase__ )
A__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCamelCase__ ),
"end_token": [-100] * len(UpperCamelCase__ ),
"category": category,
},
}
A__ = out['context'].split()
A__ = splitted_context[answer['end_token']]
A__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=UpperCamelCase__ , ).input_ids )
A__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=UpperCamelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
A__ = answer['start_token']
A__ = answer['end_token']
if assertion:
A__ = tokenizer.decode(UpperCamelCase__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , UpperCamelCase__ , end='\n\n' )
if len(UpperCamelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append('null' )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCamelCase__ )
answers_end_token.append(UpperCamelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(UpperCamelCase__ ) )
print('Old:' , tokenizer.decode(UpperCamelCase__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=2_048 , UpperCamelCase__=4_096 , UpperCamelCase__=False ):
"""simple docstring"""
A__ = get_strided_contexts_and_ans(
UpperCamelCase__ , UpperCamelCase__ , doc_stride=UpperCamelCase__ , max_length=UpperCamelCase__ , assertion=UpperCamelCase__ , )
return example
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with jsonlines.open(UpperCamelCase__ , 'a' ) as writer:
for example in tqdm(UpperCamelCase__ , total=len(UpperCamelCase__ ) , desc='Saving samples ... ' ):
A__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowerCamelCase = load_dataset("natural_questions")
__lowerCamelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__lowerCamelCase = data["train" if PROCESS_TRAIN == "true" else "validation"]
__lowerCamelCase = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
__lowerCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowerCamelCase = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__lowerCamelCase = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 154
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vector[row][0]
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Tuple =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : int =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : List[str] =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Dict =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : str =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : List[str] =y_val
SCREAMING_SNAKE_CASE__ : Any =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Optional[Any] =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 152
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322
| 0
|
from itertools import count
def __lowercase ( lowerCamelCase : int = 50 ):
UpperCamelCase_ : Optional[Any] = [1] * min_block_length
for n in count(lowerCamelCase ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
|
from typing import Any
class _lowercase :
def __init__( self : Optional[Any] , snake_case : Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = data
UpperCamelCase_ : Any = None
def __repr__( self : int ) -> str:
"""simple docstring"""
return f"Node({self.data})"
class _lowercase :
def __init__( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = None
def __iter__( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.head
while node:
yield node.data
UpperCamelCase_ : Dict = node.next
def __len__( self : int ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return "->".join([str(snake_case ) for item in self] )
def __getitem__( self : Union[str, Any] , snake_case : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Any , snake_case : int , snake_case : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCamelCase_ : int = self.head
for _ in range(snake_case ):
UpperCamelCase_ : Union[str, Any] = current.next
UpperCamelCase_ : Any = data
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : int , snake_case : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCamelCase_ : Union[str, Any] = Node(snake_case )
if self.head is None:
UpperCamelCase_ : Union[str, Any] = new_node
elif index == 0:
UpperCamelCase_ : int = self.head # link new_node to head
UpperCamelCase_ : List[str] = new_node
else:
UpperCamelCase_ : List[str] = self.head
for _ in range(index - 1 ):
UpperCamelCase_ : Union[str, Any] = temp.next
UpperCamelCase_ : Dict = temp.next
UpperCamelCase_ : Optional[Any] = new_node
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None: # print every node data
"""simple docstring"""
print(self )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCamelCase_ : str = self.head # default first node
if index == 0:
UpperCamelCase_ : List[Any] = self.head.next
else:
UpperCamelCase_ : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase_ : Tuple = temp.next
UpperCamelCase_ : List[Any] = temp.next
UpperCamelCase_ : Dict = temp.next.next
return delete_node.data
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> bool:
"""simple docstring"""
return self.head is None
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
"""simple docstring"""
UpperCamelCase_ : str = None
UpperCamelCase_ : int = self.head
while current:
# Store the current node's next node.
UpperCamelCase_ : Tuple = current.next
# Make the current node's next point backwards
UpperCamelCase_ : Tuple = prev
# Make the previous node be the current node
UpperCamelCase_ : List[Any] = current
# Make the current node the next node (to progress iteration)
UpperCamelCase_ : Dict = next_node
# Return prev in order to put the head at the end
UpperCamelCase_ : Union[str, Any] = prev
def __lowercase ( ):
UpperCamelCase_ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCamelCase ) == i
linked_list.insert_nth(lowerCamelCase , i + 1 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCamelCase ) == 9
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase_ : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(-8 , 1 ) )
def __lowercase ( ):
UpperCamelCase_ : List[str] = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
UpperCamelCase_ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase_ : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase_ : Tuple = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase_ : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowercase ( ):
from doctest import testmod
testmod()
UpperCamelCase_ : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCamelCase )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
UpperCamelCase_ : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCamelCase )
print(F"length of linked_list is : {len(lowerCamelCase )}" )
if __name__ == "__main__":
main()
| 50
| 1
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 50 ) -> int:
_UpperCAmelCase : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ : List[Any] = '''src/transformers'''
lowerCamelCase__ : Union[str, Any] = '''docs/source/en'''
lowerCamelCase__ : Optional[int] = '''.'''
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
with open(_lowerCAmelCase, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
_UpperCAmelCase : str = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Dict = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ : Dict = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ : Union[str, Any] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ : Optional[int] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ : Any = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> Any:
_UpperCAmelCase : Optional[int] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : int ) -> Any:
_UpperCAmelCase : Union[str, Any] = 2 if text == """✅""" or text == """❌""" else len(_lowerCAmelCase )
_UpperCAmelCase : str = (width - text_length) // 2
_UpperCAmelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase ( ) -> List[Any]:
_UpperCAmelCase : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase : int = {name: config.replace("""Config""", """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase : Dict = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : str = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
_UpperCAmelCase : List[str] = None
if attr_name.endswith("""Tokenizer""" ):
_UpperCAmelCase : Optional[int] = slow_tokenizers
_UpperCAmelCase : Optional[int] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
_UpperCAmelCase : List[Any] = fast_tokenizers
_UpperCAmelCase : str = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Tuple = tf_models
_UpperCAmelCase : Any = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Any = flax_models
_UpperCAmelCase : List[Any] = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Union[str, Any] = pt_models
_UpperCAmelCase : List[Any] = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase : List[str] = True
break
# Try again after removing the last word in the name
_UpperCAmelCase : Optional[Any] = """""".join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
_UpperCAmelCase : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase : List[Any] = [len(_lowerCAmelCase ) + 2 for c in columns]
_UpperCAmelCase : Optional[int] = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase : Tuple = """|""" + """|""".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
_UpperCAmelCase : Dict = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCAmelCase : Optional[int] = model_name_to_prefix[name]
_UpperCAmelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + "|\n"
return table
def UpperCamelCase ( _lowerCAmelCase : Any=False ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase, """index.md""" ), start_prompt="""<!--This table is updated automatically from the auto modules""", end_prompt="""<!-- End table-->""", )
_UpperCAmelCase : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase, """index.md""" ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ : List[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 246
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a_ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> np.array:
"""simple docstring"""
lowerCamelCase_ =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ =np.zeros((n + 1,) )
lowerCamelCase_ =ya
lowerCamelCase_ =xa
for k in range(__snake_case ):
lowerCamelCase_ =y[k] + step_size * ode_func(__snake_case , y[k] )
lowerCamelCase_ =y[k] + (
(step_size / 2) * (ode_func(__snake_case , y[k] ) + ode_func(x + step_size , __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6
| 1
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ) -> None:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 32
|
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str:
"""simple docstring"""
assert len(str(__A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a_ : List[str] = year // 1_00
a_ : Optional[int] = (5 * (century % 4) + 2) % 7
a_ : List[str] = year % 1_00
a_ : str = centurian % 12
a_ : List[str] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a_ : Any = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a_ : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 1
|
'''simple docstring'''
import os
import string
import sys
__A =1 << 8
__A ={
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__A =KEYMAP['up']
__A =KEYMAP['left']
if sys.platform == "win32":
__A =[]
__A ={
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__A =ord(str(i))
def _UpperCamelCase ( ):
if os.name == "nt":
import msvcrt
UpperCAmelCase__ : str = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
UpperCAmelCase__ : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase__ : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase__ : Tuple = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
UpperCAmelCase__ : str = chr(KEYMAP["""esc"""] )
except KeyError:
UpperCAmelCase__ : List[Any] = cha[1]
else:
UpperCAmelCase__ : Union[str, Any] = ch.decode(lowercase__ )
else:
UpperCAmelCase__ : Any = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase__ : List[Any] = sys.stdin.fileno()
UpperCAmelCase__ : str = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
UpperCAmelCase__ : str = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
UpperCAmelCase__ : str = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
UpperCAmelCase__ : int = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 363
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : str = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
UpperCAmelCase__ : List[str] = Vector()
def snake_case__ ( self):
UpperCAmelCase__ : Any = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(_lowerCamelCase) , """(0,0,0,0,0,1)""")
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4])
self.assertEqual(len(_lowerCamelCase) , 4)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = Vector([1, 2])
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4, 5])
UpperCAmelCase__ : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
UpperCAmelCase__ : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def snake_case__ ( self):
UpperCAmelCase__ : int = Vector([1, 2, 3])
UpperCAmelCase__ : Optional[Any] = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3])
UpperCAmelCase__ : Dict = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = Vector([1, 2, 3])
UpperCAmelCase__ : Optional[int] = Vector([2, -1, 4]) # for test of dot product
UpperCAmelCase__ : Any = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def snake_case__ ( self):
self.assertEqual(str(zero_vector(10)).count("""0""") , 10)
def snake_case__ ( self):
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def snake_case__ ( self):
UpperCAmelCase__ : Any = Vector([1, 2, 3])
UpperCAmelCase__ : List[str] = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , _lowerCamelCase , _lowerCamelCase)) , """(3,4,7)""")
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 0, 0, 0, 0, 0])
UpperCAmelCase__ : Optional[int] = x.copy()
self.assertEqual(str(_lowerCamelCase) , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : str = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(_lowerCamelCase) , """(0,1,0)""")
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(_lowerCamelCase , _lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowerCamelCase , _lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
UpperCAmelCase__ : List[Any] = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def snake_case__ ( self):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 283
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __UpperCamelCase ( _A : Dict=None ) ->Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""tpu-config""" , description=_description )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
lowerCamelCase_ =parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_A , default=_A , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_A , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_A , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
lowerCamelCase_ =parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_A , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Tuple ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase_ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase_ =defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase_ =defaults.commands
if not args.tpu_name:
lowerCamelCase_ =defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase_ =defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase_ ="""git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase_ ="""accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _A ):
lowerCamelCase_ =f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
lowerCamelCase_ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
lowerCamelCase_ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase_ =["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowerCamelCase_ ="""; """.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase_ =["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tpu_command_parser()
lowerCamelCase_ =parser.parse_args()
tpu_command_launcher(_A )
| 154
| 0
|
import math
import tensorflow as tf
from packaging import version
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tf.convert_to_tensor(lowerCamelCase__ )
lowerCAmelCase_ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tf.convert_to_tensor(lowerCamelCase__ )
lowerCAmelCase_ = tf.cast(math.pi , x.dtype )
lowerCAmelCase_ = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCAmelCase_ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tf.convert_to_tensor(lowerCamelCase__ )
lowerCAmelCase_ = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCAmelCase_ = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = tf.convert_to_tensor(lowerCamelCase__ )
lowerCAmelCase_ = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCamelCase ( _A ):
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 )
def __UpperCamelCase ( _A , _A=-1 ):
lowerCAmelCase_ , lowerCAmelCase_ = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __UpperCamelCase ( _A ):
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
_A = tf.keras.activations.gelu
_A = approximate_gelu_wrap
else:
_A = _gelu
_A = _gelu_new
_A = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __UpperCamelCase ( _A ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 350
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_A = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = generator.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=2, output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''', torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=UpperCamelCase__, generator=UpperCamelCase__, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = 10
lowerCamelCase__ : int = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowerCamelCase__ : str = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(_UpperCAmelCase ) ),
} , features=_UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase__ : Dict = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=_UpperCAmelCase )
return filename
# FILE_CONTENT + files
_UpperCAmelCase : Optional[int] = """\
Text data.
Second line of data."""
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowerCamelCase__ : Optional[int] = FILE_CONTENT
with open(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
import bza
lowerCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowerCamelCase__ : Any = bytes(_UpperCAmelCase , 'utf-8' )
with bza.open(_UpperCAmelCase , 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
import gzip
lowerCamelCase__ : Any = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowerCamelCase__ : Optional[Any] = bytes(_UpperCAmelCase , 'utf-8' )
with gzip.open(_UpperCAmelCase , 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowerCamelCase__ : int = bytes(_UpperCAmelCase , 'utf-8' )
with lza.frame.open(_UpperCAmelCase , 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(_UpperCAmelCase , 'w' ) as archive:
archive.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import tarfile
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(_UpperCAmelCase , 'w' ) as f:
f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
import lzma
lowerCamelCase__ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowerCamelCase__ : Optional[Any] = bytes(_UpperCAmelCase , 'utf-8' )
with lzma.open(_UpperCAmelCase , 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import zipfile
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase__ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowerCamelCase__ : int = bytes(_UpperCAmelCase , 'utf-8' )
with zstd.open(_UpperCAmelCase , 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowerCamelCase__ : Optional[int] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase )
return filename
_UpperCAmelCase : Union[str, Any] = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_UpperCAmelCase : List[Any] = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_UpperCAmelCase : Optional[int] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_UpperCAmelCase : Any = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_UpperCAmelCase : int = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : int = datasets.Dataset.from_dict(_UpperCAmelCase )
lowerCamelCase__ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
lowerCamelCase__ : List[str] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(_UpperCAmelCase , 'w' , newline='' ) as f:
lowerCamelCase__ : str = csv.DictWriter(_UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(_UpperCAmelCase , 'w' , newline='' ) as f:
lowerCamelCase__ : str = csv.DictWriter(_UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
import bza
lowerCamelCase__ : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(_UpperCAmelCase , 'rb' ) as f:
lowerCamelCase__ : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_UpperCAmelCase , 'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase__ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(_UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowerCamelCase__ : List[str] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(_UpperCAmelCase , 'wb' ) as f:
lowerCamelCase__ : Union[str, Any] = pq.ParquetWriter(_UpperCAmelCase , schema=_UpperCAmelCase )
lowerCamelCase__ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_UpperCAmelCase ) )] for k in DATA[0]} , schema=_UpperCAmelCase )
writer.write_table(_UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase__ : Tuple = {'data': DATA}
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase__ : int = {'data': DATA_DICT_OF_LISTS}
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(_UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(_UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(_UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : str = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(_UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
import gzip
lowerCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(_UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(_UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
import gzip
lowerCamelCase__ : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(_UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(_UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase , 'w' ) as f:
f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
f.add(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase , 'w' ) as f:
f.add(_UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : int = ['0', '1', '2', '3']
lowerCamelCase__ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(_UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Dict = ['0', '1', '2', '3']
lowerCamelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(_UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : List[Any] = ['0', '1', '2', '3']
lowerCamelCase__ : str = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(_UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(_UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowerCamelCase__ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : int = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(_UpperCAmelCase , 'w' ) as f:
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase , arcname=os.path.basename(_UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
lowerCamelCase__ : List[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 50
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Optional[Any] = set()
# edges = list of graph's edges
lowerCamelCase__ : List[str] = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCamelCase__ , lowerCamelCase__ : str = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> set:
lowerCamelCase__ : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 50
| 1
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE : int = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : int = f'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
SCREAMING_SNAKE_CASE : str = [sys.executable] + distributed_args
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy() )
| 352
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319
| 0
|
from collections.abc import Callable
import numpy as np
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.array:
__a = int(np.ceil((x_end - xa) / step_size ) )
__a = np.zeros((n + 1,) )
__a = ya
__a = xa
for k in range(a__ ):
__a = y[k] + step_size * ode_func(a__ , y[k] )
__a = y[k] + (
(step_size / 2) * (ode_func(a__ , y[k] ) + ode_func(x + step_size , a__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
| 1
|
import string
def UpperCamelCase ( snake_case__ : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase : Optional[int] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase : Tuple = string.ascii_uppercase.find(snake_case__ )
UpperCamelCase : str = num - key
if num < 0:
UpperCamelCase : List[str] = num + len(string.ascii_uppercase )
UpperCamelCase : List[str] = translated + string.ascii_uppercase[num]
else:
UpperCamelCase : Union[str, Any] = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def UpperCamelCase ( ) -> None:
UpperCamelCase : Dict = input('Encrypted message: ' )
UpperCamelCase : Optional[int] = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 103
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def UpperCamelCase ( snake_case__ : Accelerator , snake_case__ : int = 16 ) -> Dict:
UpperCamelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : List[str] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : Dict = 8
else:
UpperCamelCase : Union[str, Any] = None
return tokenizer.pad(
snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
UpperCamelCase : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCamelCase ( snake_case__ : str , snake_case__ : List[Any] ) -> List[str]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1":
UpperCamelCase : List[str] = 2
# New Code #
UpperCamelCase : Optional[int] = int(args.gradient_accumulation_steps )
UpperCamelCase : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : Union[str, Any] = config['lr']
UpperCamelCase : Optional[int] = int(config['num_epochs'] )
UpperCamelCase : List[Any] = int(config['seed'] )
UpperCamelCase : List[str] = int(config['batch_size'] )
UpperCamelCase : Optional[Any] = evaluate.load('glue' , 'mrpc' )
set_seed(snake_case__ )
UpperCamelCase , UpperCamelCase : int = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
UpperCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
UpperCamelCase : int = model(**snake_case__ )
UpperCamelCase : Union[str, Any] = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Dict = model(**snake_case__ )
UpperCamelCase : Any = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
UpperCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , snake_case__ )
def UpperCamelCase ( ) -> Dict:
UpperCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=snake_case__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=snake_case__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 103
| 1
|
import os
_UpperCAmelCase : int = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = 0
while index < len(_UpperCAmelCase ) - 1:
lowerCamelCase__ : Optional[int] = SYMBOLS[numerals[index]]
lowerCamelCase__ : Optional[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : int = ''
lowerCamelCase__ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCamelCase__ : str = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCamelCase__ : List[str] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "/p089_roman.txt" ) -> int:
lowerCamelCase__ : Tuple = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
lowerCamelCase__ : List[str] = filea.readlines()
for line in lines:
lowerCamelCase__ : Optional[int] = line.strip()
lowerCamelCase__ : Union[str, Any] = parse_roman_numerals(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Dict = ShapEPipeline
A : List[Any] = ['''prompt''']
A : Optional[int] = ['''prompt''']
A : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A : Optional[int] = False
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 8
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
return CLIPTextModelWithProjection(A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE : List[str] = PriorTransformer(**A )
return model
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE : Union[str, Any] = ShapERenderer(**A )
return model
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.dummy_prior
SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : Dict = self.dummy_renderer
SCREAMING_SNAKE_CASE : Optional[int] = HeunDiscreteScheduler(
beta_schedule='exp', num_train_timesteps=1_024, prediction_type='sample', use_karras_sigmas=A, clip_sample=A, clip_sample_range=1.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**A )
SCREAMING_SNAKE_CASE : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs(A ) )
SCREAMING_SNAKE_CASE : Any = output.images[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE : str = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = torch_device == 'cpu'
SCREAMING_SNAKE_CASE : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=A, relax_max_difference=A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**A )
SCREAMING_SNAKE_CASE : str = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(A )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE : List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE : Dict = pipe(**A, num_images_per_prompt=A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
SCREAMING_SNAKE_CASE : Tuple = ShapEPipeline.from_pretrained('openai/shap-e' )
SCREAMING_SNAKE_CASE : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=A ).manual_seed(0 )
SCREAMING_SNAKE_CASE : str = pipe(
'a shark', generator=A, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A, A )
| 369
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCamelCase_ = None
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
UpperCamelCase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Any = ['''input_ids''', '''attention_mask''']
A : Dict = NllbTokenizer
A : List[int] = []
A : List[int] = []
def __init__( self, A=None, A=None, A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=None, A=None, A=None, A=False, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else mask_token
SCREAMING_SNAKE_CASE : Tuple = legacy_behaviour
super().__init__(
vocab_file=A, tokenizer_file=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, src_lang=A, tgt_lang=A, additional_special_tokens=A, legacy_behaviour=A, **A, )
SCREAMING_SNAKE_CASE : Any = vocab_file
SCREAMING_SNAKE_CASE : Optional[int] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : Dict = src_lang if src_lang is not None else 'eng_Latn'
SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self, A, A, A, A, **A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : List[Any] = self(A, add_special_tokens=A, return_tensors=A, **A )
SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : int = tgt_lang_id
return inputs
def UpperCamelCase_ ( self, A, A = "eng_Latn", A = None, A = "fra_Latn", **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A, A, **A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE : Tuple = [self.cur_lang_code]
SCREAMING_SNAKE_CASE : List[Any] = [self.eos_token_id]
SCREAMING_SNAKE_CASE : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE : Dict = [self.cur_lang_code]
SCREAMING_SNAKE_CASE : str = [self.eos_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str, pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 246
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_A = 25_6047
_A = 25_6145
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = NllbTokenizer
UpperCAmelCase__ : Any = NllbTokenizerFast
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Dict = {}
def _a ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =NllbTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =NllbTokenizer(A_ , keep_accents=A_ )
__UpperCamelCase =tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCamelCase =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _a ( self ) -> int:
__UpperCamelCase =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =tokenizer_r.save_pretrained(A_ )
__UpperCamelCase =tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__UpperCamelCase =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
__UpperCamelCase =tokenizer_r.from_pretrained(A_ )
__UpperCamelCase =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
__UpperCamelCase =tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
__UpperCamelCase =tokenizer_r.from_pretrained(A_ )
__UpperCamelCase =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
__UpperCamelCase =tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCamelCase =tokenizer_r.from_pretrained(A_ )
__UpperCamelCase =tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
@require_torch
def _a ( self ) -> List[Any]:
if not self.test_seqaseq:
return
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
__UpperCamelCase =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__UpperCamelCase =tokenizer.prepare_seqaseq_batch(
src_texts=A_ , tgt_texts=A_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__UpperCamelCase =tokenizer.prepare_seqaseq_batch(
A_ , tgt_texts=A_ , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__UpperCamelCase =tokenizer.prepare_seqaseq_batch(
src_texts=A_ , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , A_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def _a ( self ) -> List[Any]:
pass
def _a ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =[AddedToken('<special>' , lstrip=A_ )]
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode('Hey this is a <special> token' )
__UpperCamelCase =tokenizer_r.encode('<special>' , add_special_tokens=A_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ , )
__UpperCamelCase =self.tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode('Hey this is a <special> token' )
__UpperCamelCase =tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "facebook/nllb-200-distilled-600M"
UpperCAmelCase__ : int = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
UpperCAmelCase__ : List[str] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
UpperCAmelCase__ : Optional[Any] = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _a ( cls ) -> List[Any]:
__UpperCamelCase =NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
__UpperCamelCase =1
return cls
def _a ( self ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 256057 )
def _a ( self ) -> Dict:
__UpperCamelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def _a ( self ) -> List[str]:
self.assertIn(A_ , self.tokenizer.all_special_ids )
# fmt: off
__UpperCamelCase =[RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
__UpperCamelCase =self.tokenizer.decode(A_ , skip_special_tokens=A_ )
__UpperCamelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def _a ( self ) -> Any:
__UpperCamelCase =['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , A_ )
__UpperCamelCase =10
__UpperCamelCase =self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A_ )
self.assertEqual(len(A_ ) , A_ )
def _a ( self ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [256203, 3] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
__UpperCamelCase =NllbTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ )
@require_torch
def _a ( self ) -> Dict:
__UpperCamelCase =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
__UpperCamelCase =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__UpperCamelCase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A_ )
self.assertEqual(A_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _a ( self ) -> Any:
__UpperCamelCase =self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='pt' )
__UpperCamelCase =self.tokenizer(
text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors='pt' )
__UpperCamelCase =targets['input_ids']
__UpperCamelCase =shift_tokens_right(
A_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(A_ ) , {
# A, test, EOS, en_XX
'input_ids': [[256047, 70, 7356, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 256057,
} , )
@require_torch
def _a ( self ) -> Optional[int]:
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
__UpperCamelCase =False
__UpperCamelCase =self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 62
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _UpperCAmelCase = "" ):
"""simple docstring"""
A_ : Optional[int] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
A_ : str = BeautifulSoup(requests.get(_UpperCAmelCase ).text , '''html.parser''' )
A_ : List[Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
A_ : List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_UpperCAmelCase , _UpperCAmelCase )
}
def lowercase_ ( _UpperCAmelCase = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
A_ : Any = get_imdb_top_aaa_movies()
with open(_UpperCAmelCase , '''w''' , newline='''''' ) as out_file:
A_ : List[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 167
| 0
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
snake_case_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
snake_case_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
snake_case_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
snake_case_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/openai/human-eval' ,codebase_urls=['https://github.com/openai/human-eval'] ,reference_urls=['https://github.com/openai/human-eval'] ,license=_LICENSE ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any]=[1, 10, 100] ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : List[Any]=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[int] = Counter()
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ) ):
for candidate in candidates:
_UpperCamelCase : int = candidate + '\n' + test_case
_UpperCamelCase : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase : Dict = executor.submit(lowerCamelCase__ ,*lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
_UpperCamelCase : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_UpperCamelCase : List[str] = [], []
for result in results.values():
result.sort()
_UpperCamelCase : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = np.array(lowerCamelCase__ )
_UpperCamelCase : List[Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = k
_UpperCamelCase : Tuple = {F'pass@{k}': estimate_pass_at_k(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 357
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Any = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=False , ) -> List[Any]:
'''simple docstring'''
A__ = size if size is not None else {"height": 20, "width": 20}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_reduce_labels
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(dataset[0]["file"] )
A__ = Image.open(dataset[1]["file"] )
return image, map
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(ds[0]["file"] )
A__ = Image.open(ds[1]["file"] )
A__ = Image.open(ds[2]["file"] )
A__ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = BeitImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
A__ = []
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A__ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
A__ , A__ = prepare_semantic_batch_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
A__ = True
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 68
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319
| 0
|
import functools
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Validation
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(SCREAMING_SNAKE_CASE_ ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(SCREAMING_SNAKE_CASE_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
lowercase__ = set(SCREAMING_SNAKE_CASE_ )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowercase_ = namedtuple("""covid_data""", """cases deaths recovered""")
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus/" ):
lowercase__ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE_ ).content ).xpath(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 224
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Tuple = 1_0
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[str] = [1, 2, 3, 4]
lowerCAmelCase_ : Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase_ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Optional[int] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowerCAmelCase_ , lowerCAmelCase_ : Dict = process_story(A_)
self.assertEqual(A_ , [])
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[Any] = ''''''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_)
self.assertEqual(A_ , [])
self.assertEqual(A_ , [])
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_)
lowerCAmelCase_ : str = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(A_ , A_)
lowerCAmelCase_ : Dict = ['''It was the best of times.''']
self.assertEqual(A_ , A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : int = torch.tensor([1, 2, 3, 4])
lowerCAmelCase_ : List[Any] = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(A_ , 0).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : str = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3])
lowerCAmelCase_ : int = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(A_ , 2_3).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1])
lowerCAmelCase_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(A_ , 1).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Optional[Any] = 1_0_1
lowerCAmelCase_ : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]])
lowerCAmelCase_ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
lowerCAmelCase_ : Optional[int] = compute_token_type_ids(A_ , A_)
np.testing.assert_array_equal(A_ , A_)
| 103
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A__ : Union[str, Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
_a = '''rag'''
_a = True
def __init__( self : str , A_ : List[Any]=None , A_ : str=True , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : List[str]=None , A_ : List[str]=None , A_ : List[Any]=None , A_ : Union[str, Any]=" / " , A_ : Tuple=" // " , A_ : Any=5 , A_ : Optional[Any]=3_0_0 , A_ : Tuple=7_6_8 , A_ : Union[str, Any]=8 , A_ : Dict="wiki_dpr" , A_ : Optional[Any]="train" , A_ : Dict="compressed" , A_ : Optional[int]=None , A_ : List[str]=None , A_ : str=False , A_ : Dict=False , A_ : Dict=0.0 , A_ : List[str]=True , A_ : List[str]=False , A_ : List[Any]=False , A_ : Any=False , A_ : Optional[int]=True , A_ : int=None , **A_ : List[str] , ):
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCAmelCase_ : List[str] = kwargs.pop('''question_encoder''')
lowerCAmelCase_ : Tuple = question_encoder_config.pop('''model_type''')
lowerCAmelCase_ : Tuple = kwargs.pop('''generator''')
lowerCAmelCase_ : Dict = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : int = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : List[Any] = reduce_loss
lowerCAmelCase_ : Optional[Any] = label_smoothing
lowerCAmelCase_ : Union[str, Any] = exclude_bos_score
lowerCAmelCase_ : List[Any] = do_marginalize
lowerCAmelCase_ : int = title_sep
lowerCAmelCase_ : Optional[int] = doc_sep
lowerCAmelCase_ : List[str] = n_docs
lowerCAmelCase_ : int = max_combined_length
lowerCAmelCase_ : Union[str, Any] = dataset
lowerCAmelCase_ : int = dataset_split
lowerCAmelCase_ : Dict = index_name
lowerCAmelCase_ : Union[str, Any] = retrieval_vector_size
lowerCAmelCase_ : Optional[Any] = retrieval_batch_size
lowerCAmelCase_ : List[str] = passages_path
lowerCAmelCase_ : Any = index_path
lowerCAmelCase_ : int = use_dummy_dataset
lowerCAmelCase_ : Tuple = output_retrieved
lowerCAmelCase_ : List[Any] = do_deduplication
lowerCAmelCase_ : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
lowerCAmelCase_ : List[Any] = getattr(self.generator , '''forced_eos_token_id''' , A_)
@classmethod
def UpperCAmelCase__ ( cls : str , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Tuple = self.question_encoder.to_dict()
lowerCAmelCase_ : Dict = self.generator.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 103
| 1
|
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
if length <= 0 or not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 351
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Union[str, Any] ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : List[Any] ) -> int:
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = False
return options
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__lowerCAmelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = 'A red cat sitting on a park bench'
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=lowerCAmelCase_ , output_type='np' , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 207
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Tuple:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
lowerCamelCase = self.vocab_size - 1
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
lowerCamelCase = model(_A , token_type_ids=_A , head_mask=_A )
lowerCamelCase = model(_A , token_type_ids=_A )
lowerCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
lowerCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
lowerCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , *A ) -> str:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
lowerCamelCase
) = config_and_inputs
lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase : Any = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A ( self , A , A , A , A , A ) -> int:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __A ( self , A , A , A=False ) -> int:
'''simple docstring'''
lowerCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
lowerCamelCase = inputs_dict["""labels"""]
lowerCamelCase = inputs_dict["""labels"""]
lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = OpenAIGPTModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_A , n_embd=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_A )
lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=_A ) # the president is
lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 252
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _UpperCAmelCase ( __a):
__a : Optional[torch.FloatTensor] = None
__a : torch.FloatTensor = None
__a : Optional[Tuple[torch.FloatTensor]] = None
__a : Optional[Tuple[torch.FloatTensor]] = None
class _UpperCAmelCase ( __a):
def __init__( self , _A=1 , _A=0 , _A=2 , _A=5_12 , _A="cls" , _A=False , _A=True , **_A , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCAmelCase : int = project_dim
_UpperCAmelCase : str = pooler_fn
_UpperCAmelCase : Union[str, Any] = learn_encoder
_UpperCAmelCase : Tuple = use_attention_mask
class _UpperCAmelCase ( __a):
__a : str = [R"""pooler""", R"""logit_scale"""]
__a : str = [R"""position_ids""", R"""predictions.decoder.bias"""]
__a : int = """roberta"""
__a : Optional[int] = RobertaSeriesConfig
def __init__( self , _A ) -> List[Any]:
'''simple docstring'''
super().__init__(_A )
_UpperCAmelCase : Dict = XLMRobertaModel(_A )
_UpperCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase : Optional[int] = getattr(_A , """has_pre_transformation""" , _A )
if self.has_pre_transformation:
_UpperCAmelCase : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
_UpperCAmelCase : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __snake_case ( self , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Any = self.base_model(
input_ids=_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_attentions=_A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_A , )
if self.has_pre_transformation:
_UpperCAmelCase : Optional[int] = outputs["""hidden_states"""][-2]
_UpperCAmelCase : str = self.pre_LN(_A )
_UpperCAmelCase : str = self.transformation_pre(_A )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 246
| 0
|
'''simple docstring'''
import string
def _A ( A__ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
__lowercase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase = string.ascii_uppercase.find(A__ )
__lowercase = num - key
if num < 0:
__lowercase = num + len(string.ascii_uppercase )
__lowercase = translated + string.ascii_uppercase[num]
else:
__lowercase = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def _A ( ):
"""simple docstring"""
__lowercase = input('''Encrypted message: ''' )
__lowercase = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'luke'
def __init__( self : int ,lowercase__ : Tuple=5_0_2_6_7 ,lowercase__ : str=5_0_0_0_0_0 ,lowercase__ : Union[str, Any]=7_6_8 ,lowercase__ : Any=2_5_6 ,lowercase__ : int=1_2 ,lowercase__ : Dict=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : Dict="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : Tuple=2 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[int]=None ,lowercase__ : Tuple=1 ,lowercase__ : int=0 ,lowercase__ : Tuple=2 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 52
| 1
|
'''simple docstring'''
from torch import nn
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = class_size
UpperCAmelCase__ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCAmelCase__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.mlp(_lowerCAmelCase )
return logits
| 346
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Any = {
"camembert-base": 512,
}
_UpperCAmelCase : List[Any] = "▁"
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Dict="<s>" , _lowerCAmelCase: Union[str, Any]="</s>" , _lowerCAmelCase: Optional[int]="</s>" , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: Tuple="<unk>" , _lowerCAmelCase: Union[str, Any]="<pad>" , _lowerCAmelCase: str="<mask>" , _lowerCAmelCase: int=["<s>NOTUSED", "</s>NOTUSED"] , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Any , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowercase :Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase :int = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
lowercase :Tuple = len(self.fairseq_tokens_to_ids )
lowercase :int = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :List[Any] = [self.cls_token_id]
lowercase :List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Any = [self.sep_token_id]
lowercase :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Any = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :Tuple = []
lowercase :Any = ""
lowercase :str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase :Optional[int] = True
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :str = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self: Dict ):
lowercase :int = self.__dict__.copy()
lowercase :List[str] = None
return state
def __setstate__( self: Optional[Any] , _lowerCAmelCase: Dict ):
lowercase :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :Optional[int] = {}
lowercase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :Any = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 236
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[Any] = """▁"""
__snake_case : Dict = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
__snake_case : List[Any] = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
__snake_case : Union[str, Any] = {
"""facebook/s2t-small-librispeech-asr""": 10_24,
}
__snake_case : int = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
__snake_case : Any = {"""mustc""": MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = MAX_MODEL_INPUT_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<unk>" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , do_upper_case=_UpperCamelCase , do_lower_case=_UpperCamelCase , tgt_lang=_UpperCamelCase , lang_codes=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCAmelCase__ = do_upper_case
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = load_json(_UpperCamelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = spm_file
lowerCAmelCase__ = load_spm(_UpperCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
lowerCAmelCase__ = lang_codes
lowerCAmelCase__ = LANGUAGES[lang_codes]
lowerCAmelCase__ = [F"<lang:{lang}>" for lang in self.langs]
lowerCAmelCase__ = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
lowerCAmelCase__ = self.lang_tokens
lowerCAmelCase__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCAmelCase__ = {}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.encoder )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [lang_code_id]
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.decoder.get(_UpperCamelCase , self.unk_token )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCAmelCase__ = self.sp_model.decode(_UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCAmelCase__ = []
else:
current_sub_tokens.append(_UpperCamelCase )
lowerCAmelCase__ = self.sp_model.decode(_UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = Path(_UpperCamelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
lowerCAmelCase__ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowerCAmelCase__ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (str(_UpperCamelCase ), str(_UpperCamelCase ))
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
lowerCAmelCase__ = sentencepiece.SentencePieceProcessor(**UpperCamelCase_ )
spm.Load(str(UpperCamelCase_ ) )
return spm
def _UpperCamelCase ( UpperCamelCase_ : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(UpperCamelCase_ , 'r' ) as f:
return json.load(UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> None:
"""simple docstring"""
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ , indent=2 )
| 122
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__snake_case : Any = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
__snake_case : List[Any] = None
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=UpperCamelCase_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=UpperCamelCase_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ = bool(qa['answers']['text'] )
return qid_to_has_ans
def _UpperCamelCase ( UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
def remove_articles(UpperCamelCase_ : Optional[int] ):
return ARTICLES_REGEX.sub(' ' , UpperCamelCase_ )
def white_space_fix(UpperCamelCase_ : Any ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase_ : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase_ ) ) ) )
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if not s:
return []
return normalize_answer(UpperCamelCase_ ).split()
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
return int(normalize_answer(UpperCamelCase_ ) == normalize_answer(UpperCamelCase_ ) )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = get_tokens(UpperCamelCase_ )
lowerCAmelCase__ = get_tokens(UpperCamelCase_ )
lowerCAmelCase__ = collections.Counter(UpperCamelCase_ ) & collections.Counter(UpperCamelCase_ )
lowerCAmelCase__ = sum(common.values() )
if len(UpperCamelCase_ ) == 0 or len(UpperCamelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCAmelCase__ = 1.0 * num_same / len(UpperCamelCase_ )
lowerCAmelCase__ = 1.0 * num_same / len(UpperCamelCase_ )
lowerCAmelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ = qa['id']
lowerCAmelCase__ = [t for t in qa['answers']['text'] if normalize_answer(UpperCamelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase__ = ['']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
lowerCAmelCase__ = preds[qid]
# Take max over all gold answers
lowerCAmelCase__ = max(compute_exact(UpperCamelCase_ , UpperCamelCase_ ) for a in gold_answers )
lowerCAmelCase__ = max(compute_fa(UpperCamelCase_ , UpperCamelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {}
for qid, s in scores.items():
lowerCAmelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase__ = float(not qid_to_has_ans[qid] )
else:
lowerCAmelCase__ = s
return new_scores
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
if not qid_list:
lowerCAmelCase__ = len(UpperCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCAmelCase__ = len(UpperCamelCase_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
for k in new_eval:
lowerCAmelCase__ = new_eval[k]
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
plt.step(UpperCamelCase_ , UpperCamelCase_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(UpperCamelCase_ , UpperCamelCase_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCamelCase_ )
plt.savefig(UpperCamelCase_ )
plt.clf()
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : na_probs[k] )
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = [1.0]
lowerCAmelCase__ = [0.0]
lowerCAmelCase__ = 0.0
for i, qid in enumerate(UpperCamelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase__ = true_pos / float(i + 1 )
lowerCAmelCase__ = true_pos / float(UpperCamelCase_ )
if i == len(UpperCamelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCamelCase_ )
recalls.append(UpperCamelCase_ )
if out_image:
plot_pr_curve(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if out_image_dir and not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
lowerCAmelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCAmelCase__ = {k: float(UpperCamelCase_ ) for k, v in qid_to_has_ans.items()}
lowerCAmelCase__ = make_precision_recall_eval(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , out_image=os.path.join(UpperCamelCase_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_exact' )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_f1' )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'pr_oracle' )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
if not qid_list:
return
lowerCAmelCase__ = [na_probs[k] for k in qid_list]
lowerCAmelCase__ = np.ones_like(UpperCamelCase_ ) / float(len(UpperCamelCase_ ) )
plt.hist(UpperCamelCase_ , weights=UpperCamelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(UpperCamelCase_ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def _UpperCamelCase ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCAmelCase__ = num_no_ans
lowerCAmelCase__ = cur_score
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : na_probs[k] )
for i, qid in enumerate(UpperCamelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase__ = scores[qid]
else:
if preds[qid]:
lowerCAmelCase__ = -1
else:
lowerCAmelCase__ = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase__ = cur_score
lowerCAmelCase__ = na_probs[qid]
return 100.0 * best_score / len(UpperCamelCase_ ), best_thresh
def _UpperCamelCase ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = find_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = find_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = best_exact
lowerCAmelCase__ = exact_thresh
lowerCAmelCase__ = best_fa
lowerCAmelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
with open(OPTS.data_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
lowerCAmelCase__ = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCAmelCase__ = json.load(UpperCamelCase_ )
else:
lowerCAmelCase__ = {k: 0.0 for k in preds}
lowerCAmelCase__ = make_qid_to_has_ans(UpperCamelCase_ ) # maps qid to True/False
lowerCAmelCase__ = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase__ , lowerCAmelCase__ = get_raw_scores(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = apply_no_ans_threshold(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase__ = apply_no_ans_threshold(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.na_prob_thresh )
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ )
if has_ans_qids:
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ , qid_list=UpperCamelCase_ )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'HasAns' )
if no_ans_qids:
lowerCAmelCase__ = make_eval_dict(UpperCamelCase_ , UpperCamelCase_ , qid_list=UpperCamelCase_ )
merge_eval(UpperCamelCase_ , UpperCamelCase_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir )
histogram_na_prob(UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(UpperCamelCase_ , UpperCamelCase_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
else:
print(json.dumps(UpperCamelCase_ , indent=2 ) )
if __name__ == "__main__":
__snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 122
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_a = ['pixel_values']
def __init__( self : Tuple , lowerCAmelCase : bool = True , lowerCAmelCase : int = 32 , lowerCAmelCase : List[Any]=PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , **lowerCAmelCase : Optional[Any] , ):
lowerCAmelCase = do_resize
lowerCAmelCase = do_rescale
lowerCAmelCase = size_divisor
lowerCAmelCase = resample
super().__init__(**SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[ChannelDimension] = None , **lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = get_image_size(SCREAMING_SNAKE_CASE_ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase = height // size_divisor * size_divisor
lowerCAmelCase = width // size_divisor * size_divisor
lowerCAmelCase = resize(SCREAMING_SNAKE_CASE_ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return image
def __lowercase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : float , lowerCAmelCase : Optional[ChannelDimension] = None , **lowerCAmelCase : Union[str, Any] ):
return rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : str , lowerCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[TensorType, str]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ):
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
lowerCAmelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for img in images]
if do_resize:
lowerCAmelCase = [self.resize(SCREAMING_SNAKE_CASE_ , size_divisor=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(SCREAMING_SNAKE_CASE_ , scale=1 / 255 ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 155
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_SCREAMING_SNAKE_CASE = """CIDAS/clipseg-rd64-refined"""
_SCREAMING_SNAKE_CASE = """image_segmenter"""
_SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
_SCREAMING_SNAKE_CASE = ["""image""", """text"""]
_SCREAMING_SNAKE_CASE = ["""image"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['vision'] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
with torch.no_grad():
lowerCAmelCase_ : List[str] = self.model(**SCREAMING_SNAKE_CASE_ ).logits
return logits
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Dict = outputs.cpu().detach().numpy()
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Optional[Any] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 224
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : int , _lowercase : Any , _lowercase : List[str]=7 , _lowercase : Dict=3 , _lowercase : Dict=10 , _lowercase : Union[str, Any]=18 , _lowercase : Dict=30 , _lowercase : List[str]=4_00 , _lowercase : Tuple=True , _lowercase : int=None , _lowercase : Union[str, Any]=True , _lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowercase : List[Any]=[0.5, 0.5, 0.5] , _lowercase : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 18}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = crop_size
def __a ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = VivitImageProcessor if is_vision_available() else None
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VivitImageProcessingTester(self )
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for video in video_inputs:
self.assertIsInstance(_lowercase , _lowercase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for video in video_inputs:
self.assertIsInstance(_lowercase , _lowercase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for video in video_inputs:
self.assertIsInstance(_lowercase , _lowercase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 361
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [0] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
SCREAMING_SNAKE_CASE__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__lowerCamelCase : int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 204
| 0
|
"""simple docstring"""
from typing import Any
class _A :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Any):
a : Optional[Any] = data
a : Optional[int] = None
def __repr__( self : str):
return f'''Node({self.data})'''
class _A :
"""simple docstring"""
def __init__( self : str):
a : Dict = None
def __iter__( self : int):
a : str = self.head
while node:
yield node.data
a : Union[str, Any] = node.next
def __len__( self : Any):
return sum(1 for _ in self)
def __repr__( self : Any):
return "->".join([str(__UpperCAmelCase) for item in self])
def __getitem__( self : int , __UpperCAmelCase : int):
if not 0 <= index < len(self):
raise ValueError("list index out of range.")
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any):
if not 0 <= index < len(self):
raise ValueError("list index out of range.")
a : List[Any] = self.head
for _ in range(__UpperCAmelCase):
a : Dict = current.next
a : Optional[Any] = data
def __snake_case ( self : Any , __UpperCAmelCase : Any):
self.insert_nth(len(self) , __UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : Any):
self.insert_nth(0 , __UpperCAmelCase)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any):
if not 0 <= index <= len(self):
raise IndexError("list index out of range")
a : Any = Node(__UpperCAmelCase)
if self.head is None:
a : int = new_node
elif index == 0:
a : Dict = self.head # link new_node to head
a : Union[str, Any] = new_node
else:
a : List[str] = self.head
for _ in range(index - 1):
a : int = temp.next
a : Optional[int] = temp.next
a : int = new_node
def __snake_case ( self : int): # print every node data
print(self)
def __snake_case ( self : Optional[Any]):
return self.delete_nth(0)
def __snake_case ( self : Any): # delete from tail
return self.delete_nth(len(self) - 1)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int = 0):
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError("List index out of range.")
a : Optional[Any] = self.head # default first node
if index == 0:
a : int = self.head.next
else:
a : List[str] = self.head
for _ in range(index - 1):
a : int = temp.next
a : Any = temp.next
a : str = temp.next.next
return delete_node.data
def __snake_case ( self : int):
return self.head is None
def __snake_case ( self : List[Any]):
a : str = None
a : Tuple = self.head
while current:
# Store the current node's next node.
a : Any = current.next
# Make the current node's next point backwards
a : Any = prev
# Make the previous node be the current node
a : Optional[int] = current
# Make the current node the next node (to progress iteration)
a : int = next_node
# Return prev in order to put the head at the end
a : Dict = prev
def lowercase ( )-> None:
'''simple docstring'''
a : Any = LinkedList()
assert linked_list.is_empty() is True
assert str(A_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(A_ ) == i
linked_list.insert_nth(A_ , i + 1 )
assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(A_ ) == "->".join(str(A_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(A_ ) == 9
assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
a : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(A_ ) == "->".join(str(A_ ) for i in range(-8 , 1 ) )
def lowercase ( )-> None:
'''simple docstring'''
a : str = [
-9,
100,
Node(77_345_112 ),
"dlrow olleH",
7,
5_555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
a : str = LinkedList()
for i in test_input:
linked_list.insert_tail(A_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a : int = linked_list.delete_head()
assert result == -9
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a : int = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a : Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(A_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A_ )
assert (
str(A_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase ( )-> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
a : int = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(A_ )
print("\nReading/changing Node data using indexing:" )
print(F'''Element at Position 1: {linked_list[1]}''' )
a : Tuple = input("Enter New Value: " ).strip()
print("New list:" )
print(A_ )
print(F'''length of linked_list is : {len(A_ )}''' )
if __name__ == "__main__":
main()
| 40
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__ : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207
| 0
|
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__SCREAMING_SNAKE_CASE =[
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None ):
"""simple docstring"""
lowercase_ : List[Any] = True
while ask_again:
lowercase_ : str = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=[] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[Any]=0 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = BulletMenu(_snake_case , _snake_case )
lowercase_ : List[str] = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Union[str, Any] = int(_snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
lowercase_ : int = int(_snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Any = int(_snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Any = int(_snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = super()._format_usage(a_ ,a_ ,a_ ,a_ )
lowercase_ : Optional[Any] = usage.replace('<command> [<args>] ' ,'' )
return usage
| 352
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = chunk_length_s
lowercase_ : Tuple = overlap
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = bool(
isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ):
lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa )
elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowercase_ : List[str] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio )
lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) )
lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio )
lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowercase_ : Union[str, Any] = 'max_length'
else:
lowercase_ : int = input_values
# normal padding on batch
if padded_inputs is None:
lowercase_ : int = self.pad(
__UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,)
if padding:
lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' )
lowercase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
lowercase_ : Optional[int] = example[..., None]
input_values.append(example.T )
lowercase_ : str = input_values
if return_tensors is not None:
lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 321
| 0
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Any:
# Format the message.
if name is None:
UpperCamelCase : Dict = None
else:
UpperCamelCase : Tuple = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase : List[str] = fmt.format(_lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if msg is not None:
print(_lowerCAmelCase )
for k in val.keys():
recursive_print(_lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCAmelCase , torch.Tensor ):
print(_lowerCAmelCase , ":" , val.size() )
else:
print(_lowerCAmelCase , ":" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
UpperCamelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase : str = param.view(*_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = param.transpose(0 , 2 )
UpperCamelCase : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase : Dict = param.view(*_lowerCAmelCase )
UpperCamelCase : Dict = param.transpose(0 , 1 ).contiguous()
UpperCamelCase : List[Any] = param.view(*_lowerCAmelCase )
return param
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
# The converted output model.
UpperCamelCase : List[str] = {}
# old versions did not store training args
UpperCamelCase : Optional[int] = input_state_dict.get("args" , _lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase : Optional[int] = ds_args.padded_vocab_size
UpperCamelCase : int = ds_args.max_position_embeddings
UpperCamelCase : Any = ds_args.hidden_size
UpperCamelCase : Dict = ds_args.num_layers
UpperCamelCase : str = ds_args.num_attention_heads
UpperCamelCase : Dict = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase : Optional[Any] = config.n_head
# The hidden_size per head.
UpperCamelCase : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase : Optional[int] = input_state_dict["checkpoint_version"]
else:
UpperCamelCase : Tuple = 0.0
# The model.
UpperCamelCase : Dict = input_state_dict["model"]
# The language model.
UpperCamelCase : Any = model["language_model"]
# The embeddings.
UpperCamelCase : Tuple = lm["embedding"]
# The word embeddings.
UpperCamelCase : List[str] = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase : Dict = word_embeddings[: config.vocab_size, :]
UpperCamelCase : Optional[int] = word_embeddings
# The position embeddings.
UpperCamelCase : List[str] = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase : Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
UpperCamelCase : List[Any] = pos_embeddings
# The transformer.
UpperCamelCase : int = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase : List[Any] = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase : int = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase : List[str] = layer_re.match(_lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase : List[str] = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase : List[Any] = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase : str = m.group(3 )
# The name of the layer.
UpperCamelCase : List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase : Optional[int] = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase : str = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase : str = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase : Dict = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase : Dict = masked_bias
UpperCamelCase : List[Any] = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase : Dict = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase )
# Store. No change of shape.
UpperCamelCase : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase : Tuple = megatron_to_transformers[op_name]
UpperCamelCase : Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase : int = megatron_to_transformers[op_name]
UpperCamelCase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase : Optional[int] = transformer["final_layernorm.weight"]
UpperCamelCase : Optional[Any] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def A_ ( ) -> List[str]:
# Create the argument parser.
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_lowerCAmelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_lowerCAmelCase , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase : List[str] = parser.parse_args()
# Extract the basename.
UpperCamelCase : List[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase : int = torch.load(_lowerCAmelCase , map_location="cpu" )
else:
UpperCamelCase : List[Any] = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase : Optional[Any] = input_state_dict.get("args" , _lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase : Dict = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase : str = "gelu_new"
else:
UpperCamelCase : Tuple = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase : Tuple = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase : List[Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_lowerCAmelCase , summary_activation=_lowerCAmelCase , summary_proj_to_labels=_lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCAmelCase , use_cache=_lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
UpperCamelCase : int = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase : str = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase : Dict = convert_megatron_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCAmelCase , _lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase : Union[str, Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase : Optional[int] = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
UpperCamelCase : Optional[int] = "gpt2"
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
UpperCamelCase : Tuple = type(_lowerCAmelCase ).__name__
UpperCamelCase : Optional[int] = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_lowerCAmelCase )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_lowerCAmelCase )
# Store the state_dict to file.
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , "pytorch_model.bin" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 52
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A__ ( __snake_case ):
_UpperCAmelCase :Dict = 'bart'
_UpperCAmelCase :str = ['past_key_values']
_UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ):
'''simple docstring'''
UpperCamelCase : int = vocab_size
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Any = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : Optional[int] = decoder_attention_heads
UpperCamelCase : int = dropout
UpperCamelCase : int = attention_dropout
UpperCamelCase : Tuple = activation_dropout
UpperCamelCase : Tuple = activation_function
UpperCamelCase : int = init_std
UpperCamelCase : List[Any] = encoder_layerdrop
UpperCamelCase : List[str] = decoder_layerdrop
UpperCamelCase : Dict = classifier_dropout
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ):
UpperCamelCase : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase : List[str] = {0: "batch"}
UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers
for i in range(A_ ):
UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCamelCase : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Tuple = super().outputs
else:
UpperCamelCase : Dict = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase : int = self.num_layers
for i in range(A_ ):
UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase : List[Any] = seq_length if not self.use_past else 1
UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase : List[Any] = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1]
UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads
UpperCamelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : List[Any] = decoder_seq_length + 3
UpperCamelCase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers
UpperCamelCase : Any = min(A_ , A_ )
UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers
UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase : Optional[Any] = seqlen + 2
UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads
UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype
UpperCamelCase : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase : Optional[Any] = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase : int = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 52
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
snake_case__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
snake_case__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
snake_case__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __lowercase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,reference_urls=[] ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : str=False ,_UpperCAmelCase : int=False ,_UpperCAmelCase : List[str]=False ,):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_a : Union[str, Any] = np.array([re.sub(_UpperCAmelCase ,'' ,_UpperCAmelCase ) for x in predictions] )
_a : Optional[Any] = np.array([re.sub(_UpperCAmelCase ,'' ,_UpperCAmelCase ) for x in references] )
else:
_a : int = np.asarray(_UpperCAmelCase )
_a : Dict = np.asarray(_UpperCAmelCase )
if ignore_case:
_a : Dict = np.char.lower(_UpperCAmelCase )
_a : Any = np.char.lower(_UpperCAmelCase )
if ignore_punctuation:
_a : Dict = string.punctuation.maketrans('' ,'' ,string.punctuation )
_a : Dict = np.char.translate(_UpperCAmelCase ,table=_UpperCAmelCase )
_a : Optional[int] = np.char.translate(_UpperCAmelCase ,table=_UpperCAmelCase )
if ignore_numbers:
_a : Union[str, Any] = string.digits.maketrans('' ,'' ,string.digits )
_a : int = np.char.translate(_UpperCAmelCase ,table=_UpperCAmelCase )
_a : List[str] = np.char.translate(_UpperCAmelCase ,table=_UpperCAmelCase )
_a : str = predictions == references
return {"exact_match": np.mean(_UpperCAmelCase ) * 100}
| 371
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : int = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[str]:
if model_type == "text":
_a : List[str] = BarkSemanticModel
_a : Optional[Any] = BarkSemanticConfig
_a : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : Tuple = BarkCoarseModel
_a : str = BarkCoarseConfig
_a : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_a : List[str] = BarkFineModel
_a : Optional[Any] = BarkFineConfig
_a : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : Dict = f"""{model_type}_small""" if use_small else model_type
_a : Union[str, Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_a : int = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_a : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_a : Dict = model_args['vocab_size']
_a : Dict = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : List[Any] = model_args.pop('n_head' )
_a : Any = model_args.pop('n_embd' )
_a : List[Any] = model_args.pop('n_layer' )
_a : Optional[int] = ConfigClass(**checkpoint['model_args'] )
_a : List[str] = ModelClass(config=lowerCAmelCase_ )
_a : Tuple = GenerationConfigClass()
_a : Optional[Any] = model_generation_config
_a : Optional[Any] = checkpoint['model']
# fixup checkpoint
_a : int = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_a : str = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_a : List[Any] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_a : List[Any] = state_dict.pop(lowerCAmelCase_ )
_a : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : Tuple = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_a : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : Optional[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_a : Dict = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_a : Tuple = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> List[Any]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : Optional[int] = 'cpu' # do conversion on cpu
_a : Tuple = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_a : List[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_a : Any = _bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_a : int = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_a : Any = 5
_a : List[str] = 10
if model_type in ["text", "coarse"]:
_a : Dict = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_a : Dict = bark_model(lowerCAmelCase_ )[0]
_a : Tuple = model(lowerCAmelCase_ )
# take last logits
_a : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_a : List[str] = 3
_a : List[Any] = 8
_a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_a : Union[str, Any] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Any:
_a : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Any = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[Any] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_a : str = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_a : Dict = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_a : int = BarkFineModel.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_a : Any = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_a : Optional[Any] = BarkModel(lowerCAmelCase_ )
_a : List[str] = semantic
_a : Union[str, Any] = coarseAcoustic
_a : Optional[int] = fineAcoustic
_a : Optional[Any] = codec
_a : List[Any] = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 107
| 0
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_A = HUGGINGFACE_HUB_CACHE
_A = '''config.json'''
_A = '''diffusion_pytorch_model.bin'''
_A = '''diffusion_flax_model.msgpack'''
_A = '''model.onnx'''
_A = '''diffusion_pytorch_model.safetensors'''
_A = '''weights.pb'''
_A = '''https://huggingface.co'''
_A = default_cache_path
_A = '''diffusers_modules'''
_A = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_A = ['''fp16''', '''non-ema''']
_A = '''.self_attn'''
| 122
|
from __future__ import annotations
def lowerCamelCase__ ( a__ : int | float | str , a__ : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
UpperCamelCase_ = int(a__ )
UpperCamelCase_ = int(a__ )
UpperCamelCase_ = []
for temp in range(int(a__ ) ):
series.append(f'''1 / {pow(temp + 1 , int(a__ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = int(input('''Enter the last number (nth term) of the P-Series'''))
_A = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 122
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A : List[Any] = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
A : List[Any] = {
"google/electra-small-generator": 5_1_2,
"google/electra-base-generator": 5_1_2,
"google/electra-large-generator": 5_1_2,
"google/electra-small-discriminator": 5_1_2,
"google/electra-base-discriminator": 5_1_2,
"google/electra-large-discriminator": 5_1_2,
}
A : Any = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =VOCAB_FILES_NAMES
__UpperCAmelCase : Any =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] =PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict =ElectraTokenizer
def __init__( self , __a=None , __a=None , __a=True , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a=True , __a=None , **__a , ):
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(__a , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**__a )
__lowerCAmelCase = do_lower_case
def snake_case ( self , __a , __a=None ):
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 350
|
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for i in sequence:
__lowerCAmelCase = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def _lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
__lowerCAmelCase = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCamelCase )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=_UpperCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 259
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ =logging.getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ):
return (preds == labels).mean()
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[Any] = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_SCREAMING_SNAKE_CASE : Tuple = field(
default=__lowercase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : int = field(
default=__lowercase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : str = field(
default=__lowercase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
_SCREAMING_SNAKE_CASE : List[Any] = field(metadata={"help": "Should contain the data files for the task."} )
_SCREAMING_SNAKE_CASE : Tuple = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_SCREAMING_SNAKE_CASE : str = field(
default=__lowercase ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCamelCase ( ):
__a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__a , __a , __a : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
try:
__a : Union[str, Any] = processors[data_args.task_name]()
__a : int = processor.get_labels()
__a : str = len(lowerCAmelCase__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__a : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__a : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
__a : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__a : Union[str, Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase__ : EvalPrediction ) -> Dict:
__a : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase__ , p.label_ids )}
# Data collator
__a : str = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__a : Any = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__a : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__a : Any = trainer.evaluate()
__a : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , lowerCAmelCase__ , lowerCAmelCase__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowerCAmelCase__ )
return results
def __UpperCamelCase ( lowerCAmelCase__ : str ):
main()
if __name__ == "__main__":
main()
| 216
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , *A_ : str , **A_ : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 204
| 0
|
def __lowerCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : int ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__magic_name__ , __magic_name__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
a__: int =rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
a__: Any =years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any] ):
# save results
if os.path.exists(__magic_name__ ):
if os.path.exists(os.path.join(__magic_name__ , "config.json" ) ) and os.path.isfile(
os.path.join(__magic_name__ , "config.json" ) ):
os.remove(os.path.join(__magic_name__ , "config.json" ) )
if os.path.exists(os.path.join(__magic_name__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__magic_name__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(__magic_name__ , "pytorch_model.bin" ) )
else:
os.makedirs(__magic_name__ )
model.save_pretrained(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=False ):
a__: int =2
if unlogit:
a__: Union[str, Any] =torch.pow(__magic_name__ , __magic_name__ )
a__: str =p * torch.log(__magic_name__ )
a__: Dict =0
return -plogp.sum(dim=-1 )
def __lowerCamelCase ( __magic_name__ : Optional[int] ):
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(__magic_name__ ) ) ) )
for row in range(len(__magic_name__ ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=False ):
a__ , a__: int =model.config.num_hidden_layers, model.config.num_attention_heads
a__: List[str] =torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
a__: List[Any] =torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
if head_mask is None:
a__: Any =torch.ones(__magic_name__ , __magic_name__ ).to(args.device )
head_mask.requires_grad_(requires_grad=__magic_name__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a__: int =None
a__: Optional[int] =0.0
a__: Optional[Any] =0.0
for step, inputs in enumerate(tqdm(__magic_name__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
a__: Tuple =tuple(t.to(args.device ) for t in inputs )
((a__) , ): List[Any] =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a__: List[Any] =model(__magic_name__ , labels=__magic_name__ , head_mask=__magic_name__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a__ , a__ , a__: Optional[Any] =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__magic_name__ ):
a__: int =entropy(attn.detach() , __magic_name__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__magic_name__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a__: Any =2
a__: Any =torch.pow(torch.pow(__magic_name__ , __magic_name__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
a__: int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__magic_name__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__magic_name__ )
logger.info("Head ranked by importance scores" )
a__: Any =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a__: List[Any] =torch.arange(
head_importance.numel() , device=args.device )
a__: int =head_ranks.view_as(__magic_name__ )
print_ad_tensor(__magic_name__ )
return attn_entropy, head_importance, total_loss
def __lowerCamelCase ( __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Tuple ):
a__ , a__ , a__: List[Any] =compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ )
a__: List[str] =1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __magic_name__ , original_score * args.masking_threshold )
a__: Union[str, Any] =torch.ones_like(__magic_name__ )
a__: Optional[Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a__: Union[str, Any] =original_score
while current_score >= original_score * args.masking_threshold:
a__: Dict =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a__: List[Any] =float("Inf" )
a__: List[str] =head_importance.view(-1 ).sort()[1]
if len(__magic_name__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
a__: Union[str, Any] =current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
a__: Any =new_head_mask.view(-1 )
a__: Optional[int] =0.0
a__: Optional[int] =new_head_mask.view_as(__magic_name__ )
a__: str =new_head_mask.clone().detach()
print_ad_tensor(__magic_name__ )
# Compute metric and head importance again
a__ , a__ , a__: Optional[Any] =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , head_mask=__magic_name__ )
a__: Optional[int] =1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __magic_name__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__magic_name__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any ):
a__: Any =datetime.now()
a__ , a__ , a__: int =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ )
a__: Optional[int] =1 / loss
a__: Optional[Any] =datetime.now() - before_time
a__: str =sum(p.numel() for p in model.parameters() )
a__: Optional[Any] ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__magic_name__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(__magic_name__ , __magic_name__ ):
a__: List[Any] =[
v,
]
assert sum(len(__magic_name__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__magic_name__ )
a__: Dict =sum(p.numel() for p in model.parameters() )
a__: Any =datetime.now()
a__ , a__ , a__: Union[str, Any] =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ , actually_pruned=__magic_name__ , )
a__: Dict =1 / loss
a__: Dict =datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __magic_name__ , __magic_name__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __magic_name__ , __magic_name__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__magic_name__ , args.output_dir )
def __lowerCamelCase ( ):
a__: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__magic_name__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__magic_name__ , type=__magic_name__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__magic_name__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__magic_name__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__magic_name__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__magic_name__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__magic_name__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__magic_name__ , help="Batch size." )
parser.add_argument("--seed" , type=__magic_name__ , default=42 )
parser.add_argument("--local_rank" , type=__magic_name__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__magic_name__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__magic_name__ , default="" , help="Can be used for distant debugging." )
a__: Union[str, Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__magic_name__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a__: Tuple =torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
a__: Optional[Any] =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a__: Optional[Any] =torch.device("cuda" , args.local_rank )
a__: Dict =1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a__: Dict =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a__: List[str] =nn.parallel.DistributedDataParallel(
__magic_name__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__magic_name__ )
elif args.n_gpu > 1:
a__: List[str] =nn.DataParallel(__magic_name__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Prepare dataset
a__: int =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a__: Any =(torch.from_numpy(__magic_name__ ),)
a__: List[str] =TensorDataset(*__magic_name__ )
a__: Optional[int] =RandomSampler(__magic_name__ )
a__: Union[str, Any] =DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a__: Optional[int] =mask_heads(__magic_name__ , __magic_name__ , __magic_name__ )
prune_heads(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 42
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ = 256047
snake_case_ = 256145
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Any = NllbTokenizer
A_ : Tuple = NllbTokenizerFast
A_ : Union[str, Any] = True
A_ : List[str] = True
A_ : int = {}
def a (self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = NllbTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = NllbTokenizer(a__ , keep_accents=a__ )
__snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case = self.tokenizer_class.from_pretrained(a__ , **a__ )
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(a__ )
__snake_case = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__snake_case = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(a__ )
__snake_case = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=True
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
__snake_case = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(a__ )
__snake_case = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=False
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
__snake_case = tokenizer_p.save_pretrained(a__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(a__ )
__snake_case = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
@require_torch
def a (self : Optional[Any] ):
"""simple docstring"""
if not self.test_seqaseq:
return
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
__snake_case = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
__snake_case = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
__snake_case = tokenizer.prepare_seqaseq_batch(
src_texts=a__ , tgt_texts=a__ , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__snake_case = tokenizer.prepare_seqaseq_batch(
a__ , tgt_texts=a__ , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__snake_case = tokenizer.prepare_seqaseq_batch(
src_texts=a__ , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , a__ )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def a (self : List[str] ):
"""simple docstring"""
pass
def a (self : int ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case = [AddedToken('''<special>''' , lstrip=a__ )]
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ )
__snake_case = tokenizer_r.encode('''Hey this is a <special> token''' )
__snake_case = tokenizer_r.encode('''<special>''' , add_special_tokens=a__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__snake_case = self.rust_tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ , )
__snake_case = self.tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ )
__snake_case = tokenizer_p.encode('''Hey this is a <special> token''' )
__snake_case = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : List[Any] = 'facebook/nllb-200-distilled-600M'
A_ : List[str] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
A_ : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
A_ : List[Any] = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def a (cls : Tuple ):
"""simple docstring"""
__snake_case = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
__snake_case = 1
return cls
def a (self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_6057 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
self.assertIn(a__ , self.tokenizer.all_special_ids )
# fmt: off
__snake_case = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__snake_case = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
__snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , a__ )
__snake_case = 10
__snake_case = self.tokenizer(a__ , max_length=a__ , truncation=a__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , a__ )
self.assertEqual(len(a__ ) , a__ )
def a (self : Tuple ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_6203, 3] )
def a (self : Any ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a__ )
__snake_case = NllbTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a__ )
@require_torch
def a (self : str ):
"""simple docstring"""
__snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__snake_case = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(a__ , a__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a__ )
self.assertEqual(a__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.tokenizer(self.src_text , padding=a__ , truncation=a__ , max_length=3 , return_tensors='''pt''' )
__snake_case = self.tokenizer(
text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=10 , return_tensors='''pt''' )
__snake_case = targets['''input_ids''']
__snake_case = shift_tokens_right(
a__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a (self : str ):
"""simple docstring"""
__snake_case = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(a__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[25_6047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_6057,
} , )
@require_torch
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = True
__snake_case = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__snake_case = False
__snake_case = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 24
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 321
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 13
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 99
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 512
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 'last'
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A_ ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ = TFFlaubertModel(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , ):
SCREAMING_SNAKE_CASE__ = TFFlaubertWithLMHeadModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
SCREAMING_SNAKE_CASE__ = TFFlaubertForQuestionAnsweringSimple(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'input_ids': input_ids, 'lengths': input_lengths}
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , ):
SCREAMING_SNAKE_CASE__ = TFFlaubertForSequenceClassification(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'input_ids': input_ids, 'lengths': input_lengths}
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFFlaubertForTokenClassification(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = TFFlaubertForMultipleChoice(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ : Dict =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A__ : Optional[Any] =(
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Union[str, Any] =False
A__ : Any =False
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = TFFlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=37 )
def A_ ( self : Any ):
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase_ )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase_ )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCAmelCase_ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCAmelCase_ )
@slow
def A_ ( self : Optional[Any] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
@slow
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 169
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Tuple =FlaxAutoencoderKL
@property
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
| 169
| 1
|
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = sorted(zip(snake_case_ , snake_case_ ) , key=lambda snake_case_ : x[0] / x[1] , reverse=snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCAmelCase = list(accumulate(snake_case_ ) )
__lowerCAmelCase = bisect(snake_case_ , snake_case_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __magic_name__ ( A : Union[str, Any], A : str, A : Optional[int]=None, A : List[str]=None ):
'''simple docstring'''
if attention_mask is None:
a = tf.cast(tf.math.not_equal(A, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = OPTConfig
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : List[str] = """gelu"""
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Any=4 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=20 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=16 , ) -> Any:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = embed_dim
a = word_embed_proj_dim
a = False
def __UpperCAmelCase ( self : str ) -> int:
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowerCamelCase , **self.config_updates , )
a = prepare_opt_inputs_dict(__lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> List[str]:
a = TFOPTModel(config=__lowerCamelCase )
a = inputs_dict["input_ids"]
a = input_ids[:1, :]
a = inputs_dict["attention_mask"][:1, :]
a = 1
# first forward pass
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = 10
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = TFOPTModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowerCamelCase : Tuple , __lowerCamelCase : int ):
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a = model_class(config=__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __lowerCamelCase )
# check that weights remain the same after resizing
a = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __lowerCamelCase )
a = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
return tf.constant(A, dtype=tf.intaa )
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 99
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = tf.ones((4, 1) , dtype=tf.intaa ) * 2
a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a = input_ids.shape[0]
a = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = TFOPTModel.from_pretrained("facebook/opt-350m" )
a = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a = tf.not_equal(__lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
a = model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase ).last_hidden_state
a = (1, 11, 5_12)
self.assertEqual(output.shape , __lowerCamelCase )
a = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-3 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = xla_generate(__lowerCamelCase , __lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-2 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
a = "facebook/opt-350m"
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = TFOPTForCausalLM.from_pretrained(self.path_model )
a = GPTaTokenizer.from_pretrained(self.path_model )
a = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
a = "facebook/opt-125m"
a = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Dict:
a = "facebook/opt-350m"
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
a = "left"
# use different length sentences to test batching
a = [
"Hello, my dog is a little",
"Today, I",
]
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase )
a = inputs["input_ids"]
a = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["attention_mask"] )
a = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase )
a = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
a = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
a = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
a = "facebook/opt-350m"
a = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 107
| 0
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = str(id_ )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , _lowercase ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def _lowercase ( self , _lowercase ):
"""simple docstring"""
self.neighbors.append(_lowercase )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = weight
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :Dict , __lowerCamelCase :Optional[int] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __lowerCamelCase )
def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ):
_lowerCAmelCase = []
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = graph[:]
while q:
_lowerCAmelCase = min(__lowerCamelCase )
q.remove(__lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(__lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ):
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = list(__lowerCamelCase )
hq.heapify(__lowerCamelCase )
while h:
_lowerCAmelCase = hq.heappop(__lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
hq.heapify(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A__ : List[Any] = get_tests_dir('''fixtures''')
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str]):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : Tuple = mock.Mock()
lowerCAmelCase_ : Tuple = 5_0_0
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Any = HTTPError
lowerCAmelCase_ : Optional[int] = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Any = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A_) as mock_head:
lowerCAmelCase_ : Optional[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''')
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any]):
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : Tuple = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''')
def UpperCAmelCase__ ( self : List[str]):
with self.assertRaises(A_):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase_ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''')
lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''')
self.assertIsNotNone(A_)
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls : Optional[int]):
lowerCAmelCase_ : Union[str, Any] = TOKEN
HfFolder.save_token(A_)
@classmethod
def UpperCAmelCase__ ( cls : int):
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''')
except HTTPError:
pass
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[str] = ViTImageProcessor.from_pretrained(A_)
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token)
lowerCAmelCase_ : List[str] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_))
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='''test-image-processor''' , push_to_hub=A_ , use_auth_token=self._token)
lowerCAmelCase_ : Tuple = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_))
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Dict = ViTImageProcessor.from_pretrained(A_)
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token)
lowerCAmelCase_ : int = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''')
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=A_ , use_auth_token=self._token)
lowerCAmelCase_ : Dict = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''')
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_))
def UpperCAmelCase__ ( self : Union[str, Any]):
CustomImageProcessor.register_for_auto_class()
lowerCAmelCase_ : Optional[int] = CustomImageProcessor.from_pretrained(A_)
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
lowerCAmelCase_ : List[Any] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=A_)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''')
| 103
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='git_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :List[str] = num_channels
UpperCamelCase :Optional[int] = patch_size
UpperCamelCase :Optional[int] = image_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = attention_dropout
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
UpperCamelCase :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='git'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=101 , SCREAMING_SNAKE_CASE_=102 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
UpperCamelCase :Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
UpperCamelCase :Union[str, Any] = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[str] = intermediate_size
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = max_position_embeddings
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Any = layer_norm_eps
UpperCamelCase :int = position_embedding_type
UpperCamelCase :Dict = use_cache
UpperCamelCase :Tuple = tie_word_embeddings
UpperCamelCase :Union[str, Any] = num_image_with_embedding
UpperCamelCase :Optional[int] = bos_token_id
UpperCamelCase :List[Any] = eos_token_id
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase :Optional[int] = self.vision_config.to_dict()
UpperCamelCase :int = self.__class__.model_type
return output
| 259
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
assert column_title.isupper()
lowercase_ : Dict = 0
lowercase_ : Tuple = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Optional[int] = 0
while index >= 0:
lowercase_ : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 , __SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 264
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase_ = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
lowerCAmelCase_ = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = BertTokenizer
def __init__( self : Dict , _A : int=None , _A : Dict=None , _A : Union[str, Any]=True , _A : Any="[UNK]" , _A : Any="[SEP]" , _A : Tuple="[PAD]" , _A : int="[CLS]" , _A : int="[MASK]" , _A : List[str]=True , _A : Tuple=None , **_A : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
lowercase : Optional[int] = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
lowercase : Tuple = do_lower_case
lowercase : List[str] = strip_accents
lowercase : List[Any] = tokenize_chinese_chars
lowercase : str = normalizer_class(**lowerCAmelCase_ )
lowercase : Union[str, Any] = do_lower_case
def __a ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ) -> List[str]:
"""simple docstring"""
lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : str = None ) -> Any:
"""simple docstring"""
lowercase : Dict = [self.sep_token_id]
lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : List[Any] , _A : Dict , _A : str = None ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 308
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase : Optional[Any] = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = generator.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42
| 0
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A__ ( datasets.BuilderConfig ):
_UpperCAmelCase :Optional[datasets.Features] = None
class A__ ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase :str = PandasConfig
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
UpperCamelCase : Dict = data_files
if isinstance(A_ , A_ ):
UpperCamelCase : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase : Union[str, Any] = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCamelCase : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
UpperCamelCase : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase : str = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={"files": files} ) )
return splits
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase : Optional[Any] = table_cast(A_ , self.config.features.arrow_schema )
return pa_table
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(A_ ) ):
with open(A_ , "rb" ) as f:
UpperCamelCase : List[Any] = pa.Table.from_pandas(pd.read_pickle(A_ ) )
yield i, self._cast_table(A_ )
| 140
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[float]:
UpperCamelCase , UpperCamelCase : List[Any] = coefficient_matrix.shape
UpperCamelCase , UpperCamelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
UpperCamelCase : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
UpperCamelCase : Tuple = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
UpperCamelCase : Any = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase , UpperCamelCase : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = []
for row in range(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = 0
for col in range(_lowerCAmelCase ):
if col == row:
UpperCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
UpperCamelCase : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase : Dict = (temp + val) / denom
new_val.append(_lowerCAmelCase )
UpperCamelCase : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase , UpperCamelCase : Dict = table.shape
UpperCamelCase : List[Any] = True
for i in range(0 , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
UpperCAmelCase__ , UpperCAmelCase__ = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """realm"""
def __init__( self :str , lowerCamelCase :List[Any]=3_0522 , lowerCamelCase :Optional[int]=768 , lowerCamelCase :Any=128 , lowerCamelCase :Tuple=12 , lowerCamelCase :str=12 , lowerCamelCase :List[str]=8 , lowerCamelCase :List[str]=3072 , lowerCamelCase :List[str]="gelu_new" , lowerCamelCase :int=0.1 , lowerCamelCase :Optional[Any]=0.1 , lowerCamelCase :int=512 , lowerCamelCase :Union[str, Any]=2 , lowerCamelCase :str=0.02 , lowerCamelCase :Tuple=1e-12 , lowerCamelCase :Dict=256 , lowerCamelCase :int=10 , lowerCamelCase :List[str]=1e-3 , lowerCamelCase :str=5 , lowerCamelCase :Optional[int]=320 , lowerCamelCase :Union[str, Any]=1335_3718 , lowerCamelCase :str=5000 , lowerCamelCase :str=1 , lowerCamelCase :List[Any]=0 , lowerCamelCase :Tuple=2 , **lowerCamelCase :Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
# Common config
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = retriever_proj_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = num_candidates
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = layer_norm_eps
# Reader config
UpperCAmelCase__ = span_hidden_size
UpperCAmelCase__ = max_span_width
UpperCAmelCase__ = reader_layer_norm_eps
UpperCAmelCase__ = reader_beam_size
UpperCAmelCase__ = reader_seq_len
# Retrieval config
UpperCAmelCase__ = num_block_records
UpperCAmelCase__ = searcher_beam_size
| 169
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
UpperCamelCase_ : Any = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __a ( _UpperCamelCase: TreeNode | None ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_UpperCamelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCamelCase: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCamelCase ) != count_coins(_UpperCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(_UpperCamelCase: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_snake_case , _snake_case = get_distrib(node.left )
_snake_case , _snake_case = get_distrib(node.right )
_snake_case = 1 - left_distrib_excess
_snake_case = 1 - right_distrib_excess
_snake_case = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCamelCase )
+ abs(_UpperCamelCase )
)
_snake_case = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCamelCase , _UpperCamelCase )
return get_distrib(_UpperCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
|
'''simple docstring'''
import pprint
import requests
UpperCamelCase_ : Tuple = '''https://zenquotes.io/api'''
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __a ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase_ : Any = random_quotes()
pprint.pprint(response)
| 142
| 1
|
from collections import deque
from math import floor
from random import random
from time import time
class _lowerCamelCase :
"""simple docstring"""
def __init__( self )->Tuple:
'''simple docstring'''
A_ : Any = {}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )->Optional[Any]:
'''simple docstring'''
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A_ : Any = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE__ ):
A_ : int = []
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )->Optional[int]:
'''simple docstring'''
if s == d:
return []
A_ : Optional[int] = []
A_ : int = []
if s == -2:
A_ : Optional[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
A_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
A_ : List[str] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
A_ : Optional[int] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )->int:
'''simple docstring'''
if c == -1:
A_ : Optional[int] = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : Tuple = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )->Tuple:
'''simple docstring'''
A_ : Tuple = deque()
A_ : List[Any] = []
if s == -2:
A_ : int = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
A_ : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )->Union[str, Any]:
'''simple docstring'''
A_ : List[str] = []
A_ : Dict = []
if s == -2:
A_ : int = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
A_ : int = s
A_ : Tuple = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Any = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE__ ) != 0:
A_ : Any = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
A_ : Optional[int] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return sorted_nodes
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Optional[int] = []
A_ : int = []
A_ : str = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
A_ : Dict = -2
A_ : Optional[Any] = []
A_ : Dict = s
A_ : Tuple = False
A_ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Dict = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Tuple = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
A_ : List[Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
A_ : Optional[Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
A_ : str = s
A_ : Tuple = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Dict = []
A_ : List[Any] = []
A_ : Dict = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
A_ : str = -2
A_ : Optional[Any] = []
A_ : Any = s
A_ : Tuple = False
A_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : int = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : List[Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
A_ : List[Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
A_ : Union[str, Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
A_ : Optional[int] = s
A_ : Optional[int] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )->str:
'''simple docstring'''
A_ : Optional[int] = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A_ : Tuple = time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )->Tuple:
'''simple docstring'''
A_ : str = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
A_ : Optional[Any] = time()
return end - begin
class _lowerCamelCase :
"""simple docstring"""
def __init__( self )->Dict:
'''simple docstring'''
A_ : List[Any] = {}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 )->Optional[Any]:
'''simple docstring'''
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A_ : str = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A_ : Union[str, Any] = [[w, u]]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE__ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )->Optional[Any]:
'''simple docstring'''
if s == d:
return []
A_ : Optional[Any] = []
A_ : Union[str, Any] = []
if s == -2:
A_ : Any = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
A_ : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
A_ : Any = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
A_ : Optional[Any] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-1 )->Optional[Any]:
'''simple docstring'''
if c == -1:
A_ : List[str] = floor(random() * 1_0000 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A_ : Optional[int] = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = deque()
A_ : Union[str, Any] = []
if s == -2:
A_ : int = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
A_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return len(self.graph[u] )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[str] = []
A_ : Dict = []
A_ : Union[str, Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
A_ : List[str] = -2
A_ : Any = []
A_ : List[Any] = s
A_ : Optional[int] = False
A_ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : List[Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
A_ : List[Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
A_ : Dict = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
A_ : int = s
A_ : Tuple = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : int = []
A_ : Union[str, Any] = []
A_ : Optional[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
A_ : Optional[Any] = -2
A_ : str = []
A_ : int = s
A_ : int = False
A_ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : List[Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
A_ : int = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
A_ : Optional[int] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
A_ : str = s
A_ : Dict = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
return list(self.graph )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 , _SCREAMING_SNAKE_CASE=-1 )->Optional[int]:
'''simple docstring'''
A_ : Optional[int] = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A_ : str = time()
return end - begin
def _snake_case ( self , _SCREAMING_SNAKE_CASE=-2 )->Dict:
'''simple docstring'''
A_ : str = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
A_ : str = time()
return end - begin
| 186
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A : Optional[int] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> None:
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 229
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _SCREAMING_SNAKE_CASE :
_UpperCamelCase:int = BlenderbotSmallConfig
_UpperCamelCase:Optional[int] = {}
_UpperCamelCase:Union[str, Any] = "gelu"
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , )-> str:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =eos_token_id
lowerCamelCase_ =pad_token_id
lowerCamelCase_ =bos_token_id
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_ =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_ =tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_ =prepare_blenderbot_small_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =TFBlenderbotSmallModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
lowerCamelCase_ =inputs_dict["""input_ids"""]
lowerCamelCase_ =input_ids[:1, :]
lowerCamelCase_ =inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_ =inputs_dict["""head_mask"""]
lowerCamelCase_ =1
# first forward pass
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ , lowerCamelCase_ =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_ =tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_ =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_ =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_ =output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_ =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 )
def __UpperCamelCase ( _A : List[str] , _A : Tuple , _A : str , _A : Dict=None , _A : List[Any]=None , _A : List[str]=None , _A : List[str]=None , _A : Optional[Any]=None , ) ->int:
"""simple docstring"""
if attention_mask is None:
lowerCamelCase_ =tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_ =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_ =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_UpperCamelCase:Tuple = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase:Dict = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase:int = True
_UpperCamelCase:Union[str, Any] = False
_UpperCamelCase:List[Any] = False
def _snake_case ( self )-> int:
lowerCamelCase_ =TFBlenderbotSmallModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
self.config_tester.run_common_tests()
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
_UpperCamelCase:List[str] = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
_UpperCamelCase:Union[str, Any] = "facebook/blenderbot_small-90M"
@cached_property
def _snake_case ( self )-> Optional[int]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.tokenizer(self.src_text , return_tensors="""tf""" )
lowerCamelCase_ =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 355
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Tuple = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['GLPNFeatureExtractor']
__A : Dict = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
| 0
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ):
snake_case_ : int = HfArgumentParser(_a )
snake_case_ : List[str] = parser.parse_args_into_dataclasses()[0]
snake_case_ : Optional[Any] = TensorFlowBenchmark(args=_a )
try:
snake_case_ : Dict = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case_ : Union[str, Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
snake_case_ : Any = ''' '''.join(str(_a ).split(''' ''' )[:-1] )
snake_case_ : Optional[int] = ''''''
snake_case_ : Any = eval(str(_a ).split(''' ''' )[-1] )
snake_case_ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_a )
if len(_a ) > 0:
snake_case_ : Optional[int] = full_error_msg + begin_error_msg + str(_a )
raise ValueError(_a )
benchmark.run()
if __name__ == "__main__":
main()
| 264
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264
| 1
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase = 16
_UpperCAmelCase = 32
def UpperCamelCase ( __lowercase : Accelerator ,__lowercase : int = 16 ,__lowercase : str = "bert-base-cased" ):
'''simple docstring'''
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase )
A_ : Any = load_dataset('glue' ,'mrpc' )
def tokenize_function(__lowercase : str ):
# max_length=None => use the model max length (it's actually the default)
A_ : List[Any] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__lowercase ,max_length=__lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ : Union[str, Any] = datasets.map(
__lowercase ,batched=__lowercase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=__lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Tuple = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(__lowercase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowercase ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(__lowercase ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
A_ : Tuple = DataLoader(
tokenized_datasets['train'] ,shuffle=__lowercase ,collate_fn=__lowercase ,batch_size=__lowercase )
A_ : Optional[int] = DataLoader(
tokenized_datasets['validation'] ,shuffle=__lowercase ,collate_fn=__lowercase ,batch_size=__lowercase )
return train_dataloader, eval_dataloader
def UpperCamelCase ( __lowercase : str ,__lowercase : Tuple ,__lowercase : List[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
model.eval()
A_ : Tuple = 0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : Optional[Any] = model(**__lowercase )
A_ : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A_ , A_ : Dict = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowercase ) - 1:
A_ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A_ : str = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowercase ,references=__lowercase ,)
A_ : List[str] = metric.compute()
return eval_metric["accuracy"]
def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config['lr']
A_ : str = int(config['num_epochs'] )
A_ : Dict = int(config['seed'] )
A_ : Optional[Any] = int(config['batch_size'] )
A_ : Tuple = args.model_name_or_path
set_seed(__lowercase )
A_ , A_ : Dict = get_dataloaders(__lowercase ,__lowercase ,__lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(__lowercase ,return_dict=__lowercase )
# Instantiate optimizer
A_ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A_ : Optional[int] = optimizer_cls(params=model.parameters() ,lr=__lowercase )
if accelerator.state.deepspeed_plugin is not None:
A_ : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A_ : List[Any] = 1
A_ : List[str] = (len(__lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowercase ,num_warmup_steps=0 ,num_training_steps=__lowercase ,)
else:
A_ : int = DummyScheduler(__lowercase ,total_num_steps=__lowercase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : str = accelerator.prepare(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# We need to keep track of how many total steps we have iterated over
A_ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
A_ : Union[str, Any] = 0
A_ : Union[str, Any] = evaluate.load('glue' ,'mrpc' )
A_ : List[Any] = num_epochs
if args.partial_train_epoch is not None:
A_ : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A_ : List[str] = args.resume_from_checkpoint.split('epoch_' )[1]
A_ : List[str] = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A_ : Union[str, Any] = int(__lowercase ) + 1
A_ : Dict = evaluation_loop(__lowercase ,__lowercase ,__lowercase ,__lowercase )
accelerator.print('resumed checkpoint performance:' ,__lowercase )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,f'''state_{starting_epoch-1}.json''' ) ,'r' ) as f:
A_ : List[str] = json.load(__lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A_ : Union[str, Any] = {}
for epoch in range(__lowercase ,__lowercase ):
model.train()
for step, batch in enumerate(__lowercase ):
A_ : List[str] = model(**__lowercase )
A_ : Union[str, Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(__lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A_ : int = f'''epoch_{epoch}'''
A_ : Optional[int] = os.path.join(args.output_dir ,__lowercase )
accelerator.save_state(__lowercase )
A_ : Dict = evaluation_loop(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Dict = accuracy
A_ : str = lr_scheduler.get_lr()[0]
A_ : int = optimizer.param_groups[0]['lr']
A_ : Optional[int] = epoch
A_ : Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' ,__lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,f'''state_{epoch}.json''' ) ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=__lowercase ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=__lowercase ,)
parser.add_argument(
'--output_dir' ,type=__lowercase ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=__lowercase ,default=__lowercase ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=__lowercase ,default=__lowercase ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=__lowercase ,default=2 ,help='Number of train epochs.' ,)
A_ : Union[str, Any] = parser.parse_args()
A_ : Dict = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(__lowercase ,__lowercase )
if __name__ == "__main__":
main()
| 140
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 140
| 1
|
from math import ceil, sqrt
def __lowerCAmelCase ( a__ = 100_0000 ) -> int:
__a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 33
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A( a ):
snake_case_ = 0
snake_case_ = False
snake_case_ = 3.0
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
__a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _snake_case )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_snake_case , env=os.environ.copy() )
if __name__ == "__main__":
A : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
A : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
A : int = torch.nn.Linear(1_0_0, 2_0_0)
A : Optional[int] = accelerator.prepare(model)
# Check the values changed in kwargs
A : List[Any] = ''
A : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 33
| 1
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A : Optional[int] = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase__ : int = os.path.abspath(UpperCAmelCase )
logger.info(f"Loading PyTorch weights from {pt_path}" )
lowerCamelCase__ : Union[str, Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
lowerCamelCase__ : Optional[int] = convert_pytorch_state_dict_to_flax(UpperCAmelCase , UpperCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase__ : str = convert_pytorch_sharded_state_dict_to_flax(UpperCAmelCase , UpperCAmelCase )
return flax_state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(UpperCAmelCase ) -> bool:
return len(set(UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase__ : Any = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase__ : Dict = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase__ : Optional[int] = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(UpperCAmelCase ):
lowerCamelCase__ : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ : Dict = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase__ : str = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase__ : Tuple = pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase__ : List[str] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase__ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__ : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase__ : str = flax_model.params['''params''']
else:
lowerCamelCase__ : Any = flax_model.params
lowerCamelCase__ : str = flatten_dict(UpperCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__ : str = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ : List[str] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase__ : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ : List[str] = rename_key_and_reshape_tensor(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# add model prefix if necessary
lowerCamelCase__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase__ : Dict = jnp.asarray(UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : List[Any] = jnp.asarray(UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : Any = jnp.asarray(UpperCAmelCase )
return unflatten_dict(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase__ : List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase__ : str = torch.load(UpperCAmelCase )
lowerCamelCase__ : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__ : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__ : Optional[int] = flax_model.params['''params''']
lowerCamelCase__ : int = flatten_dict(UpperCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase__ : Any = flax_model.params
lowerCamelCase__ : str = flatten_dict(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__ : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ : Union[str, Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = rename_key_and_reshape_tensor(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# add model prefix if necessary
lowerCamelCase__ : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : Dict = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase__ : Any = jnp.asarray(UpperCAmelCase )
continue
if "var" in flax_key[-1]:
lowerCamelCase__ : List[Any] = jnp.asarray(UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : List[str] = jnp.asarray(UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__ : int = jnp.asarray(UpperCAmelCase )
return unflatten_dict(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = os.path.abspath(UpperCAmelCase )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
lowerCamelCase__ : int = getattr(UpperCAmelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(UpperCAmelCase , '''rb''' ) as state_f:
try:
lowerCamelCase__ : Tuple = from_bytes(UpperCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase__ : List[str] = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase : x.dtype == jnp.bfloataa , UpperCAmelCase ) ).values()
if any(UpperCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase__ : int = jax.tree_util.tree_map(
lambda UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase )
lowerCamelCase__ : str = flatten_dict(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = pt_model.state_dict()
lowerCamelCase__ : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase__ : Dict = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase__ : int = []
lowerCamelCase__ : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase__ : Any = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase__ : Tuple = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__ : Optional[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__ : str = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(UpperCAmelCase ) not in pt_model_dict:
# conv layer
lowerCamelCase__ : str = flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase__ : str = jnp.transpose(UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase ) not in pt_model_dict:
# linear layer
lowerCamelCase__ : Dict = flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase__ : List[str] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__ : int = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase__ : Dict = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase__ : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase__ : List[str] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase__ : Dict = '''.'''.join(UpperCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase__ : Dict = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase__ : Optional[Any] = key.split('''.''' )
lowerCamelCase__ : Tuple = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase__ : int = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase__ : Optional[Any] = key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase__ : Dict = key_components[:-3] + [name]
lowerCamelCase__ : str = '''.'''.join(UpperCAmelCase )
lowerCamelCase__ : List[str] = key
if flax_key in special_pt_names:
lowerCamelCase__ : Optional[int] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
lowerCamelCase__ : Optional[Any] = np.asarray(UpperCAmelCase ) if not isinstance(UpperCAmelCase , np.ndarray ) else flax_tensor
lowerCamelCase__ : Optional[int] = torch.from_numpy(UpperCAmelCase )
# remove from missing keys
missing_keys.remove(UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase )
pt_model.load_state_dict(UpperCAmelCase )
# re-transform missing_keys to list
lowerCamelCase__ : Tuple = list(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(UpperCAmelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 142
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
lowerCamelCase__ : Union[str, Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase__ : Dict = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
lowerCamelCase__ : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase__ : int = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
lowerCamelCase__ : str = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase__ : Any = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
lowerCamelCase__ : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
"""simple docstring"""
if split_mlp_wi:
lowerCamelCase__ : Dict = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
lowerCamelCase__ : List[Any] = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
lowerCamelCase__ : str = (wi_a, wi_a)
else:
lowerCamelCase__ : Optional[int] = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
lowerCamelCase__ : Tuple = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def _a ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = traverse_util.flatten_dict(variables['''target'''] )
lowerCamelCase__ : Union[str, Any] = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase__ : Any = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase )
lowerCamelCase__ : List[str] = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase__ : List[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''attention''' )
lowerCamelCase__ : Optional[Any] = layer_norm
lowerCamelCase__ : Tuple = k.T
lowerCamelCase__ : Tuple = o.T
lowerCamelCase__ : List[Any] = q.T
lowerCamelCase__ : Optional[int] = v.T
# Block i, layer 1 (MLP).
lowerCamelCase__ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : Any = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , UpperCAmelCase )
lowerCamelCase__ : Any = layer_norm
if split_mlp_wi:
lowerCamelCase__ : Any = wi[0].T
lowerCamelCase__ : Any = wi[1].T
else:
lowerCamelCase__ : Tuple = wi.T
lowerCamelCase__ : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Tuple = tax_relpos_bias_lookup(
UpperCAmelCase , UpperCAmelCase , '''encoder''' ).T
lowerCamelCase__ : List[Any] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowerCamelCase__ : Optional[Any] = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''encoder''' ).T
lowerCamelCase__ : Any = tax_relpos_bias_lookup(
UpperCAmelCase , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowerCamelCase__ : int = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''self_attention''' )
lowerCamelCase__ : Tuple = layer_norm
lowerCamelCase__ : Tuple = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : List[Any] = q.T
lowerCamelCase__ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase__ : Dict = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowerCamelCase__ : int = layer_norm
lowerCamelCase__ : int = k.T
lowerCamelCase__ : List[Any] = o.T
lowerCamelCase__ : Dict = q.T
lowerCamelCase__ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowerCamelCase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowerCamelCase__ , lowerCamelCase__ : int = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = layer_norm
if split_mlp_wi:
lowerCamelCase__ : List[str] = wi[0].T
lowerCamelCase__ : Optional[int] = wi[1].T
else:
lowerCamelCase__ : List[str] = wi.T
lowerCamelCase__ : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase__ : Dict = tax_relpos_bias_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' ).T
lowerCamelCase__ : str = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase__ : Dict = old['''decoder/logits_dense/kernel'''].T
return new
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase__ : Optional[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowerCamelCase__ : Dict = state_dict['''shared.weight''']
return state_dict
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = checkpoints.load_tax_checkpoint(UpperCAmelCase )
lowerCamelCase__ : str = convert_tax_to_pytorch(
UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase , scalable_attention=UpperCAmelCase )
lowerCamelCase__ : int = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = False , ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = MTaConfig.from_json_file(UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase__ : Optional[int] = UMTaEncoderModel(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = UMTaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_A : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 142
| 1
|
import math
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = input('Enter message: ' )
snake_case = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
snake_case = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
snake_case = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith('d' ):
snake_case = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = [""""""] * key
for col in range(snake_case_ ):
snake_case = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : str ) -> Any:
"""simple docstring"""
snake_case = math.ceil(len(snake_case_ ) / key )
snake_case = key
snake_case = (num_cols * num_rows) - len(snake_case_ )
snake_case = [""""""] * num_cols
snake_case = 0
snake_case = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
snake_case = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 355
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = random.Random()
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=4_00 , lowerCAmelCase=20_00 , lowerCAmelCase=20_48 , lowerCAmelCase=1_28 , lowerCAmelCase=1 , lowerCAmelCase=5_12 , lowerCAmelCase=30 , lowerCAmelCase=4_41_00 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = spectrogram_length
snake_case = feature_size
snake_case = num_audio_channels
snake_case = hop_length
snake_case = chunk_length
snake_case = sampling_rate
def snake_case ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self , lowerCAmelCase=False , lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = TvltFeatureExtractor
def snake_case ( self ):
"""simple docstring"""
snake_case = TvltFeatureExtractionTester(self )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'spectrogram_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'num_audio_channels' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'hop_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'chunk_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate' ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = feat_extract_first.save_pretrained(lowerCAmelCase )[0]
check_json_file_has_correct_format(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_pretrained(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = os.path.join(lowerCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_json_file(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case = feature_extractor(
lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(lowerCAmelCase )
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case = ds.sort('id' ).select(range(lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
"""simple docstring"""
snake_case = self._load_datasamples(1 )
snake_case = TvltFeatureExtractor()
snake_case = feature_extractor(lowerCAmelCase , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
snake_case = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCAmelCase , atol=1E-4 ) )
| 149
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__A : Tuple = TypeVar("T")
__A : int = Union[List[T], Tuple[T, ...]]
__A : Union[str, Any] = Union[T, List[T], Dict[str, T]]
__A : str = Union[str, bytes, os.PathLike]
| 260
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_A , '''depth_multiplier''' ) )
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=3 , _snake_case=32 , _snake_case=0.25 , _snake_case=8 , _snake_case=True , _snake_case=1024 , _snake_case=32 , _snake_case="relu6" , _snake_case=0.1 , _snake_case=0.02 , _snake_case=True , _snake_case=True , _snake_case=10 , _snake_case=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = depth_multiplier
UpperCAmelCase = min_depth
UpperCAmelCase = tf_padding
UpperCAmelCase = int(last_hidden_size * depth_multiplier )
UpperCAmelCase = output_stride
UpperCAmelCase = hidden_act
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = use_labels
UpperCAmelCase = is_training
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = MobileNetVaModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileNetVaForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = MobileNetVaModelTester(self )
UpperCAmelCase = MobileNetVaConfigTester(self , config_class=_A , has_text_modality=_A )
def snake_case_ ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
UpperCAmelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 26
self.assertEqual(len(_A ) , _A )
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(_A , _A , _A )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = MobileNetVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ) -> str:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_A )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 357
|
import string
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase = string.ascii_uppercase.find(A__ )
UpperCAmelCase = num - key
if num < 0:
UpperCAmelCase = num + len(string.ascii_uppercase )
UpperCAmelCase = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = input('''Encrypted message: ''' )
UpperCAmelCase = message.upper()
decrypt(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 152
| 0
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ort.SessionOptions()
UpperCamelCase = False
return options
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = """A red cat sitting on a park bench"""
UpperCamelCase = np.random.RandomState(0 )
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCamelCase_ , output_type="""np""" , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 343
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """swin"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1E-4
| 343
| 1
|
'''simple docstring'''
snake_case_ : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Tuple = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case_ : list[bool | None] = [None] * 10000000
snake_case_ : List[str] = True
snake_case_ : Optional[Any] = False
def A__ ( UpperCAmelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = number_chain
while number < 1_0_0_0_0_0_0_0:
_UpperCamelCase : str = number_chain
number *= 1_0
return number_chain
def A__ ( UpperCAmelCase_ = 1_0_0_0_0_0_0_0 ):
for i in range(1 , UpperCAmelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 236
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
snake_case_ : List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
snake_case_ : Tuple = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
snake_case_ : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
snake_case_ : str = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
snake_case_ : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) ,homepage='https://github.com/openai/human-eval' ,codebase_urls=['https://github.com/openai/human-eval'] ,reference_urls=['https://github.com/openai/human-eval'] ,license=_LICENSE ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any]=[1, 10, 100] ,lowerCamelCase__ : int=4 ,lowerCamelCase__ : List[Any]=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[int] = Counter()
_UpperCamelCase : int = 0
_UpperCamelCase : Dict = defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ) ):
for candidate in candidates:
_UpperCamelCase : int = candidate + '\n' + test_case
_UpperCamelCase : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
_UpperCamelCase : Dict = executor.submit(lowerCamelCase__ ,*lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
_UpperCamelCase : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_UpperCamelCase , _UpperCamelCase : List[str] = [], []
for result in results.values():
result.sort()
_UpperCamelCase : Optional[Any] = [r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
_UpperCamelCase : List[str] = np.array(lowerCamelCase__ )
_UpperCamelCase : List[Any] = np.array(lowerCamelCase__ )
_UpperCamelCase : Tuple = k
_UpperCamelCase : Tuple = {F'pass@{k}': estimate_pass_at_k(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def estimator(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
else:
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = iter(UpperCAmelCase_ )
return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
| 236
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : List[str] = logging.get_logger(__name__)
__A : int = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : int = "deta"
SCREAMING_SNAKE_CASE_ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , A : Optional[int]=None , A : Union[str, Any]=9_00 , A : Tuple=20_48 , A : int=6 , A : str=20_48 , A : Any=8 , A : Optional[int]=6 , A : Dict=10_24 , A : str=8 , A : Dict=0.0 , A : Union[str, Any]=True , A : List[Any]="relu" , A : Tuple=2_56 , A : Optional[int]=0.1 , A : int=0.0 , A : str=0.0 , A : List[Any]=0.02 , A : Union[str, Any]=1.0 , A : str=True , A : str=False , A : Optional[int]="sine" , A : Optional[Any]=5 , A : str=4 , A : Union[str, Any]=4 , A : Tuple=True , A : Union[str, Any]=3_00 , A : Optional[Any]=True , A : int=True , A : Dict=1 , A : Tuple=5 , A : Optional[Any]=2 , A : Optional[Any]=1 , A : Any=1 , A : int=5 , A : Optional[Any]=2 , A : List[str]=0.1 , A : Dict=0.25 , **A : Tuple , ) -> Dict:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase_ : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(A , A ):
lowercase_ : List[str] = backbone_config.pop('''model_type''' )
lowercase_ : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Union[str, Any] = config_class.from_dict(A )
lowercase_ : List[str] = backbone_config
lowercase_ : Optional[int] = num_queries
lowercase_ : str = max_position_embeddings
lowercase_ : Any = d_model
lowercase_ : Optional[Any] = encoder_ffn_dim
lowercase_ : List[str] = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : int = decoder_ffn_dim
lowercase_ : List[Any] = decoder_layers
lowercase_ : int = decoder_attention_heads
lowercase_ : Optional[Any] = dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : str = activation_dropout
lowercase_ : List[str] = activation_function
lowercase_ : int = init_std
lowercase_ : Dict = init_xavier_std
lowercase_ : List[Any] = encoder_layerdrop
lowercase_ : str = auxiliary_loss
lowercase_ : Dict = position_embedding_type
# deformable attributes
lowercase_ : Union[str, Any] = num_feature_levels
lowercase_ : Optional[int] = encoder_n_points
lowercase_ : Dict = decoder_n_points
lowercase_ : Tuple = two_stage
lowercase_ : Union[str, Any] = two_stage_num_proposals
lowercase_ : Tuple = with_box_refine
lowercase_ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase_ : Optional[Any] = class_cost
lowercase_ : Dict = bbox_cost
lowercase_ : Optional[int] = giou_cost
# Loss coefficients
lowercase_ : Optional[int] = mask_loss_coefficient
lowercase_ : Optional[Any] = dice_loss_coefficient
lowercase_ : Dict = bbox_loss_coefficient
lowercase_ : int = giou_loss_coefficient
lowercase_ : Union[str, Any] = eos_coefficient
lowercase_ : Dict = focal_alpha
super().__init__(is_encoder_decoder=A , **A )
@property
def A ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def A ( self : Optional[int] ) -> int:
return self.d_model
def A ( self : List[Any] ) -> Dict:
lowercase_ : str = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : List[Any] = self.__class__.model_type
return output
| 33
|
"""simple docstring"""
def lowercase ( __snake_case : list[int] ):
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if numbers[j] < numbers[i]:
lowercase_ , lowercase_ : Optional[int] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__A : int = input('''Enter numbers separated by a comma:\n''').strip()
__A : Any = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 33
| 1
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str | Literal[False]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def _a ( SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
while True:
SCREAMING_SNAKE_CASE__ : Dict = ["$"] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : str = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE__ : str = "*"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_lowerCamelCase ) )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Sequence[float] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for minterm in minterms:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ""
for _ in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE__ : List[Any] = j
if count == 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : Tuple = 0
for i in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE__ : Optional[int] = count_n
SCREAMING_SNAKE_CASE__ : List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Any = 0
def _a ( SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[str] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = 1
return chart
def _a ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = int(input("Enter the no. of variables\n" ) )
SCREAMING_SNAKE_CASE__ : List[str] = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
SCREAMING_SNAKE_CASE__ : List[str] = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 370
|
def _a ( SCREAMING_SNAKE_CASE__ : int = 50_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = set()
SCREAMING_SNAKE_CASE__ : Dict = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE__ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE__ : Optional[int] = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE__ : List[str] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE__ : Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 191
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : List[Any] , a__ : Optional[int] , a__ : Optional[int]=13 , a__ : List[Any]=7 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[Any]=True , a__ : Optional[Any]=True , a__ : Optional[int]=99 , a__ : Optional[Any]=32 , a__ : List[str]=5 , a__ : Any=4 , a__ : str=37 , a__ : Optional[int]="gelu" , a__ : Optional[Any]=0.1 , a__ : Dict=0.1 , a__ : Any=512 , a__ : Union[str, Any]=16 , a__ : Any=2 , a__ : Optional[int]=0.0_2 , a__ : Optional[int]=4 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = True
__snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Any = True
A_ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a (self : Dict ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a (self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : str ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__snake_case = model(a__ )[0]
__snake_case = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , a__ )
# compare the actual values for a slice.
__snake_case = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__snake_case = model(a__ )[0]
# compare the actual values for a slice.
__snake_case = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
| 24
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _a :
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=sys.maxsize ):
'''simple docstring'''
UpperCamelCase__: List[Any] = "bilinear"
UpperCamelCase__: Optional[int] = max_size
UpperCamelCase__: Optional[int] = short_edge_length
def __call__( self: Optional[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__: Any = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__: Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__: Dict = size * 1.0 / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__: Dict = scale * h, size
if max(__lowerCamelCase , __lowerCamelCase ) > self.max_size:
UpperCamelCase__: str = self.max_size * 1.0 / max(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = newh * scale
UpperCamelCase__: Any = neww * scale
UpperCamelCase__: List[str] = int(neww + 0.5 )
UpperCamelCase__: List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__: Dict = Image.fromarray(__lowerCamelCase )
UpperCamelCase__: Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__: str = np.asarray(__lowerCamelCase )
else:
UpperCamelCase__: Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__: Optional[Any] = nn.functional.interpolate(
__lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCamelCase ).squeeze(0 )
img_augs.append(__lowerCamelCase )
return img_augs
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__: Union[str, Any] = cfg.INPUT.FORMAT
UpperCamelCase__: Union[str, Any] = cfg.SIZE_DIVISIBILITY
UpperCamelCase__: Tuple = cfg.PAD_VALUE
UpperCamelCase__: str = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__: int = cfg.MODEL.DEVICE
UpperCamelCase__: str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: List[Any] = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = tuple(max(__lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__: Tuple = [im.shape[-2:] for im in images]
UpperCamelCase__: Optional[int] = [
nn.functional.pad(
__lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__lowerCamelCase , __lowerCamelCase )
]
return torch.stack(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
def __call__( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Any=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: int = [images]
if single_image:
assert len(__lowerCamelCase ) == 1
for i in range(len(__lowerCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__lowerCamelCase , images.pop(__lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__: int = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__: int = self.aug(__lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__: Any = [self.normalizer(__lowerCamelCase ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__: Any = self.pad(__lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__: Optional[int] = torch.true_divide(__lowerCamelCase , __lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( A_ ,A_):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( A_ ,A_):
assert torch.isfinite(A_).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__: int = box_size
tensor[:, 0].clamp_(min=0 ,max=A_)
tensor[:, 1].clamp_(min=0 ,max=A_)
tensor[:, 2].clamp_(min=0 ,max=A_)
tensor[:, 3].clamp_(min=0 ,max=A_)
| 149
| 0
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_UpperCamelCase: List[Any] = logging.get_logger(__name__)
class a__ :
_lowerCamelCase = None
@experimental
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return _map_with_joblib(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : List[str] = num_proc if num_proc <= len(_UpperCAmelCase ) else len(_UpperCAmelCase )
lowercase : Union[str, Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(_UpperCAmelCase ):
lowercase : str = len(_UpperCAmelCase ) // num_proc
lowercase : Optional[Any] = len(_UpperCAmelCase ) % num_proc
lowercase : List[Any] = div * index + min(_UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_UpperCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(_UpperCAmelCase )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(_UpperCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
lowercase , lowercase : Optional[Any] = None, None
if not disable_tqdm:
lowercase , lowercase : Dict = (RLock(),), tqdm.set_lock
with Pool(_UpperCAmelCase , initargs=_UpperCAmelCase , initializer=_UpperCAmelCase ) as pool:
lowercase : Optional[int] = pool.map(_UpperCAmelCase , _UpperCAmelCase )
logger.info(f'''Finished {num_proc} processes''' )
lowercase : Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(_UpperCAmelCase )} objects''' )
return mapped
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_UpperCAmelCase ):
return joblib.Parallel()(
joblib.delayed(_UpperCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowercase__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase : Tuple = None
| 53
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase: str = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCamelCase: int = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCamelCase: Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowercase ( self : List[str] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ), codebase_urls=['https://www.atticusprojectai.org/cuad'], reference_urls=['https://www.atticusprojectai.org/cuad'], )
def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
lowercase : int = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowercase : Any = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowercase : int = evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 53
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = IFPipeline
_a : List[Any] = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_a : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self._get_dummy_components()
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_save_load_local()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
__lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_A , tokenizer=_A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
__lowerCAmelCase , __lowerCAmelCase = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowerCAmelCase = None
__lowerCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_A , _A , _A , _A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components )
__lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_A , _A , _A , _A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components )
__lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_A , _A , _A , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , num_inference_steps=2 , generator=_A , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(_A , _A )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_A )
__lowerCAmelCase = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , generator=_A , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
_start_torch_memory_measurement()
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , num_inference_steps=2 , generator=_A , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(_A , _A )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_A )
__lowerCAmelCase = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
_start_torch_memory_measurement()
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , num_inference_steps=2 , generator=_A , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(_A , _A )
# pipeline 2
_start_torch_memory_measurement()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_A )
__lowerCAmelCase = pipe_a(
prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_A , _A )
def _a ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 92
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] =value
SCREAMING_SNAKE_CASE__ : Node | None =None
SCREAMING_SNAKE_CASE__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Node ) -> None:
SCREAMING_SNAKE_CASE__ : Any =tree
def __magic_name__ ( self : str , __lowercase : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Union[str, Any] ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =[]
for part_id in partition_order:
a =df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowercase ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a =pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a =spark.range(1_00 ).repartition(1 )
a =Spark(lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a =pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a =spark.range(10 ).repartition(2 )
a =[1, 0]
a =_generate_iterable_examples(lowercase , lowercase ) # Reverse the partitions.
a =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a , a =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a =pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a =spark.range(10 ).repartition(1 )
a =SparkExamplesIterable(lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a =pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a =lambda lowercase : x.reverse()
a =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [2, 1, 0] )
a =SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase ):
a , a =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a =pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a =SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase ):
a , a =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a =SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a =_get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase ):
a , a =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a =pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a =spark.range(1_00 ).repartition(1 )
a =Spark(lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Union[str, Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215
| 0
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __lowerCAmelCase ( lowerCAmelCase):
# to overwrite at feature extractactor specific tests
_a = None
_a = None
@property
def SCREAMING_SNAKE_CASE ( self: List[str] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "sampling_rate" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "padding_value" ) )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Any = self.feat_extract_tester.prepare_inputs_for_common()
lowercase :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :Optional[int] = feat_extract.model_input_names[0]
lowercase :Tuple = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
lowercase :int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
lowercase :int = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
lowercase :List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase :List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
lowercase :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :Any = feat_extract.model_input_names[0]
lowercase :int = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
lowercase :Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase :Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
lowercase :Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :int = feat_extract.model_input_names[0]
lowercase :List[str] = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
lowercase :Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase :Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str=False ):
def _inputs_have_equal_length(_lowerCAmelCase: int ):
lowercase :Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase: int , _lowerCAmelCase: Tuple ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
lowercase :Optional[Any] = feat_extract.model_input_names[0]
lowercase :Any = BatchFeature({input_name: speech_inputs} )
lowercase :List[str] = self.feat_extract_tester.seq_length_diff
lowercase :Any = self.feat_extract_tester.max_seq_length + pad_diff
lowercase :Any = self.feat_extract_tester.min_seq_length
lowercase :Dict = self.feat_extract_tester.batch_size
lowercase :Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase :Optional[int] = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
lowercase :int = input_a[input_name]
lowercase :List[Any] = feat_extract.pad(_lowerCAmelCase , padding="longest" )
lowercase :str = input_a[input_name]
lowercase :Union[str, Any] = feat_extract.pad(_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
lowercase :Optional[int] = input_a[input_name]
lowercase :List[Any] = feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="np" )
lowercase :str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="max_length" )[input_name]
lowercase :Any = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , return_tensors="np" )
lowercase :int = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase :List[Any] = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=10 )
lowercase :Optional[Any] = input_a[input_name]
lowercase :Any = feat_extract.pad(_lowerCAmelCase , padding="longest" , pad_to_multiple_of=10 )
lowercase :List[str] = input_a[input_name]
lowercase :List[Any] = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase )
lowercase :Tuple = input_a[input_name]
lowercase :Dict = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=_lowerCAmelCase , return_tensors="np" , )
lowercase :Union[str, Any] = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :Optional[Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase :Union[str, Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: int=False ):
def _inputs_have_equal_length(_lowerCAmelCase: Union[str, Any] ):
lowercase :str = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase :Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
lowercase :Tuple = feat_extract.model_input_names[0]
lowercase :List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase :Tuple = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
lowercase :List[str] = input_a[input_name]
lowercase :Union[str, Any] = feat_extract.pad(_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
lowercase :Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
lowercase :Tuple = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=_lowerCAmelCase , )
lowercase :Optional[Any] = input_a[input_name]
lowercase :int = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
lowercase :Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
lowercase :int = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors="np" , )
lowercase :Union[str, Any] = input_a[input_name]
lowercase :List[Any] = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
lowercase :Tuple = input_a[input_name]
lowercase :Tuple = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
lowercase :Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="longest" , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="longest" , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding="max_length" , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase :int = 12
lowercase :Tuple = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
lowercase :Union[str, Any] = input_a[input_name]
lowercase :Any = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
lowercase :Optional[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase :List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase :Dict = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self: int ):
self._check_padding(numpify=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self._check_padding(numpify=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
self._check_truncation(numpify=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :int = self.feat_extract_tester.prepare_inputs_for_common()
lowercase :str = feat_extract.model_input_names[0]
lowercase :Optional[Any] = BatchFeature({input_name: speech_inputs} )
lowercase :Tuple = feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="np" )[input_name]
lowercase :Tuple = feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase :Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase :List[Any] = feat_extract.model_input_names[0]
lowercase :List[Any] = BatchFeature({input_name: speech_inputs} )
lowercase :Union[str, Any] = feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="np" )[input_name]
lowercase :Dict = feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :str = self.feat_extract_dict
lowercase :Optional[int] = True
lowercase :Union[str, Any] = self.feature_extraction_class(**_lowerCAmelCase )
lowercase :int = self.feat_extract_tester.prepare_inputs_for_common()
lowercase :int = [len(_lowerCAmelCase ) for x in speech_inputs]
lowercase :List[str] = feat_extract.model_input_names[0]
lowercase :int = BatchFeature({input_name: speech_inputs} )
lowercase :Dict = feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.feat_extract_dict
lowercase :Any = True
lowercase :str = self.feature_extraction_class(**_lowerCAmelCase )
lowercase :str = self.feat_extract_tester.prepare_inputs_for_common()
lowercase :int = [len(_lowerCAmelCase ) for x in speech_inputs]
lowercase :Dict = feat_extract.model_input_names[0]
lowercase :Tuple = BatchFeature({input_name: speech_inputs} )
lowercase :str = min(_lowerCAmelCase )
lowercase :Optional[Any] = feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="np" )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 236
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''whisper'''
_a = ['''past_key_values''']
_a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _lowerCAmelCase: str=5_18_65 , _lowerCAmelCase: str=80 , _lowerCAmelCase: int=6 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=6 , _lowerCAmelCase: List[Any]=4 , _lowerCAmelCase: Any=15_36 , _lowerCAmelCase: Union[str, Any]=15_36 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: str=0.0 , _lowerCAmelCase: List[Any]=5_02_57 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Dict=2_56 , _lowerCAmelCase: Union[str, Any]=0.0 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: Any=False , _lowerCAmelCase: List[str]=15_00 , _lowerCAmelCase: Tuple=4_48 , _lowerCAmelCase: Optional[Any]=5_02_56 , _lowerCAmelCase: Dict=5_02_56 , _lowerCAmelCase: List[Any]=5_02_56 , _lowerCAmelCase: Union[str, Any]=None , _lowerCAmelCase: str=[2_20, 5_02_56] , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[int]=2_56 , _lowerCAmelCase: int=False , _lowerCAmelCase: Dict=0.05 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: Tuple=0.0 , _lowerCAmelCase: str=10 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: List[Any]=7 , **_lowerCAmelCase: Union[str, Any] , ):
lowercase :Optional[Any] = vocab_size
lowercase :Optional[int] = num_mel_bins
lowercase :Union[str, Any] = d_model
lowercase :List[Any] = encoder_layers
lowercase :Optional[Any] = encoder_attention_heads
lowercase :Union[str, Any] = decoder_layers
lowercase :List[str] = decoder_attention_heads
lowercase :Optional[int] = decoder_ffn_dim
lowercase :List[Any] = encoder_ffn_dim
lowercase :Optional[Any] = dropout
lowercase :Tuple = attention_dropout
lowercase :Tuple = activation_dropout
lowercase :Optional[Any] = activation_function
lowercase :Any = init_std
lowercase :Optional[int] = encoder_layerdrop
lowercase :Optional[int] = decoder_layerdrop
lowercase :str = use_cache
lowercase :Optional[Any] = encoder_layers
lowercase :List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase :Any = max_source_positions
lowercase :Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase :int = classifier_proj_size
lowercase :List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase :Tuple = apply_spec_augment
lowercase :int = mask_time_prob
lowercase :Union[str, Any] = mask_time_length
lowercase :Dict = mask_time_min_masks
lowercase :Tuple = mask_feature_prob
lowercase :List[Any] = mask_feature_length
lowercase :List[Any] = mask_feature_min_masks
lowercase :Any = median_filter_width
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , suppress_tokens=_lowerCAmelCase , begin_suppress_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase):
@property
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Tuple = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase :List[Any] = {0: "batch"}
else:
lowercase :str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
return common_inputs
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase: int = -1 , _lowerCAmelCase: int = -1 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional["TensorType"] = None , _lowerCAmelCase: int = 2_20_50 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: int = 2_20 , ):
lowercase :List[str] = OrderedDict()
lowercase :str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_lowerCAmelCase , framework=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , time_duration=_lowerCAmelCase , frequency=_lowerCAmelCase , )
lowercase :Optional[Any] = encoder_inputs["input_features"].shape[2]
lowercase :List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase :Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :str = encoder_inputs.pop("input_features" )
lowercase :Optional[int] = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase :List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE ( self: str ):
return 1e-3
| 236
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__(self : Union[str, Any] , _A : Union[str, Any] , _A : Dict=7 , _A : Any=3 , _A : Optional[Any]=30 , _A : Any=4_00 , _A : Tuple=True , _A : List[str]=None , _A : Tuple=True , _A : str=[0.5, 0.5, 0.5] , _A : Tuple=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 2_55 , _A : Optional[int]=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__snake_case : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__snake_case : int = parent
__snake_case : List[str] = batch_size
__snake_case : Optional[int] = num_channels
__snake_case : Optional[Any] = min_resolution
__snake_case : List[str] = max_resolution
__snake_case : Optional[Any] = do_resize
__snake_case : Optional[int] = size
__snake_case : Tuple = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : List[str] = image_std
__snake_case : List[str] = do_rescale
__snake_case : Union[str, Any] = rescale_factor
__snake_case : str = do_pad
def _lowercase (self : Any) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase (self : Optional[int] , _A : int , _A : Optional[Any]=False) -> str:
if not batched:
__snake_case : List[str] = image_inputs[0]
if isinstance(_A , Image.Image):
__snake_case : Union[str, Any] = image.size
else:
__snake_case : str = image.shape[1], image.shape[2]
if w < h:
__snake_case : int = int(self.size['shortest_edge'] * h / w)
__snake_case : Any = self.size['shortest_edge']
elif w > h:
__snake_case : Any = self.size['shortest_edge']
__snake_case : Tuple = int(self.size['shortest_edge'] * w / h)
else:
__snake_case : str = self.size['shortest_edge']
__snake_case : Optional[int] = self.size['shortest_edge']
else:
__snake_case : Union[str, Any] = []
for image in image_inputs:
__snake_case : Union[str, Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__snake_case : Optional[Any] = max(_A , key=lambda _A: item[0])[0]
__snake_case : Any = max(_A , key=lambda _A: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase (self : int) -> int:
__snake_case : Optional[int] = DeformableDetrImageProcessingTester(self)
@property
def _lowercase (self : Dict) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : int) -> Any:
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , 'image_mean'))
self.assertTrue(hasattr(_A , 'image_std'))
self.assertTrue(hasattr(_A , 'do_normalize'))
self.assertTrue(hasattr(_A , 'do_resize'))
self.assertTrue(hasattr(_A , 'do_rescale'))
self.assertTrue(hasattr(_A , 'do_pad'))
self.assertTrue(hasattr(_A , 'size'))
def _lowercase (self : Tuple) -> List[str]:
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , _A)
__snake_case : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , _A)
def _lowercase (self : str) -> List[Any]:
pass
def _lowercase (self : Dict) -> Optional[int]:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__snake_case : Dict = self.image_processor_tester.get_expected_values(_A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : str = self.image_processor_tester.get_expected_values(_A , batched=_A)
__snake_case : Union[str, Any] = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase (self : List[str]) -> Optional[int]:
# Initialize image_processing
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A)
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__snake_case : Dict = self.image_processor_tester.get_expected_values(_A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Any = image_processing(_A , return_tensors='pt').pixel_values
__snake_case : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase (self : Any) -> Union[str, Any]:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
__snake_case : Any = self.image_processor_tester.get_expected_values(_A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : int = image_processing(_A , return_tensors='pt').pixel_values
__snake_case : List[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase (self : Dict) -> Dict:
# prepare image and target
__snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
__snake_case : Dict = json.loads(f.read())
__snake_case : Dict = {'image_id': 3_97_69, 'annotations': target}
# encode them
__snake_case : Dict = DeformableDetrImageProcessor()
__snake_case : List[str] = image_processing(images=_A , annotations=_A , return_tensors='pt')
# verify pixel values
__snake_case : str = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , _A)
__snake_case : str = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1E-4))
# verify area
__snake_case : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A))
# verify boxes
__snake_case : List[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A)
__snake_case : int = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1E-3))
# verify image_id
__snake_case : Optional[int] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A))
# verify is_crowd
__snake_case : int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A))
# verify class_labels
__snake_case : List[str] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A))
# verify orig_size
__snake_case : Optional[Any] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A))
# verify size
__snake_case : Optional[Any] = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A))
@slow
def _lowercase (self : str) -> Union[str, Any]:
# prepare image, target and masks_path
__snake_case : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
__snake_case : List[Any] = json.loads(f.read())
__snake_case : Dict = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__snake_case : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__snake_case : str = DeformableDetrImageProcessor(format='coco_panoptic')
__snake_case : List[str] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt')
# verify pixel values
__snake_case : Tuple = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , _A)
__snake_case : Optional[int] = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1E-4))
# verify area
__snake_case : List[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A))
# verify boxes
__snake_case : List[Any] = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A)
__snake_case : Tuple = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1E-3))
# verify image_id
__snake_case : List[str] = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A))
# verify is_crowd
__snake_case : List[str] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A))
# verify class_labels
__snake_case : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A))
# verify masks
__snake_case : Optional[int] = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A)
# verify orig_size
__snake_case : List[str] = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A))
# verify size
__snake_case : Any = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A))
| 368
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase_ , 2 ) + pow(UpperCAmelCase_ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95
| 0
|
"""simple docstring"""
import numpy as np
import datasets
snake_case__ : Dict = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
snake_case__ : Union[str, Any] = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
snake_case__ : Any = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ):
# convert to numpy arrays
lowerCAmelCase : List[str] = np.array(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCAmelCase : List[Any] = X - np.mean(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
lowerCAmelCase : Dict = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
lowerCAmelCase : List[str] = np.linalg.pinv(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = np.dot(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 60
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCamelCase_ = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> int:
__SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=a_ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=a_ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=a_ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=a_ , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE :Any = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE :Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE :str = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE :Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE :Union[str, Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'''{len(a_ )} examples to process.''' )
__SCREAMING_SNAKE_CASE :Optional[int] = []
__SCREAMING_SNAKE_CASE :List[str] = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = 1_00_00
__SCREAMING_SNAKE_CASE :List[Any] = time.time()
for text in data:
__SCREAMING_SNAKE_CASE :Any = f'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE :int = tokenizer.encode(a_ , add_special_tokens=a_ )
rslt.append(a_ )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE :Any = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE :Any = time.time()
logger.info('''Finished binarization''' )
logger.info(f'''{len(a_ )} examples processed.''' )
__SCREAMING_SNAKE_CASE :Optional[int] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE :str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE :Union[str, Any] = [np.uintaa(a_ ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE :List[Any] = [np.intaa(a_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(a_ , '''wb''' ) as handle:
pickle.dump(rslt_ , a_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 191
| 0
|
'''simple docstring'''
UpperCAmelCase_ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 354
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = input_str.split("""_""" )
UpperCAmelCase__ = 0 if use_pascal else 1
UpperCAmelCase__ = words[start_index:]
UpperCAmelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase__ = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 61
| 0
|
'''simple docstring'''
def lowercase__ ( __lowercase : int = 10 ) -> str:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or n < 0:
raise ValueError('Invalid input' )
__UpperCamelCase = 10**n
__UpperCamelCase = 28433 * (pow(2 , 7830457 , __lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(10) = }')
| 53
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] ={
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =[
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
| 1
|
from ...configuration_utils import PretrainedConfig
class _lowerCamelCase( _a ):
"""simple docstring"""
lowercase_ : List[Any] = """bert-generation"""
def __init__( self, lowerCamelCase=5_03_58, lowerCamelCase=10_24, lowerCamelCase=24, lowerCamelCase=16, lowerCamelCase=40_96, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=0, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase="absolute", lowerCamelCase=True, **lowerCamelCase, ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = vocab_size
_lowercase : int = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : str = hidden_act
_lowercase : Dict = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : List[str] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Any = initializer_range
_lowercase : int = layer_norm_eps
_lowercase : Dict = position_embedding_type
_lowercase : List[str] = use_cache
| 353
|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Union[str, Any] = len(lowerCamelCase_ ) // 2
# choose the middle 3 elements
_lowercase : Any = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
| 0
|
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ) -> int:
return abs(lowerCAmelCase_ ) if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase_ )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : str ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A_ : Union[str, Any] = y, x % y
return abs(lowerCAmelCase_ )
def __snake_case ( ) -> Tuple:
try:
A_ : List[str] = input("Enter two integers separated by comma (,): " ).split("," )
A_ : Tuple = int(nums[0] )
A_ : Dict = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(lowerCAmelCase_ , lowerCAmelCase_ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCAmelCase_ , lowerCAmelCase_ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 300
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase ( unittest.TestCase , _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = load_tool("""text-classification""" )
self.tool.setup()
_UpperCAmelCase : Tuple = load_tool("""text-classification""" ,remote=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
| 215
| 0
|
from string import ascii_lowercase, ascii_uppercase
def lowerCAmelCase_ ( snake_case_ ):
if not sentence:
return ""
_A : Tuple = dict(zip(snake_case_,snake_case_ ) )
return lower_to_upper.get(sentence[0],sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_=0 ):
# Format the message.
if name is None:
_A : Union[str, Any] = None
else:
_A : Dict = """.""" * max(0,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_A : Tuple = fmt.format(snake_case_ )
# Print and recurse (if needed).
if isinstance(snake_case_,snake_case_ ):
if msg is not None:
print(snake_case_ )
for k in val.keys():
recursive_print(snake_case_,val[k],spaces + 2 )
elif isinstance(snake_case_,torch.Tensor ):
print(snake_case_,""":""",val.size() )
else:
print(snake_case_,""":""",snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_A : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_A : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_A : Tuple = param.view(*snake_case_ )
_A : Any = param.transpose(0,2 )
_A : int = param.transpose(1,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_A : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_A : int = param.view(*snake_case_ )
_A : Any = param.transpose(0,1 ).contiguous()
_A : Optional[int] = param.view(*snake_case_ )
return param
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# The converted output model.
_A : Any = {}
# old versions did not store training args
_A : str = input_state_dict.get("""args""",snake_case_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_A : Union[str, Any] = ds_args.padded_vocab_size
_A : List[Any] = ds_args.max_position_embeddings
_A : Optional[int] = ds_args.hidden_size
_A : List[Any] = ds_args.num_layers
_A : List[str] = ds_args.num_attention_heads
_A : int = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_A : Union[str, Any] = config.n_head
# The hidden_size per head.
_A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_A : Tuple = input_state_dict["""checkpoint_version"""]
else:
_A : Any = 0.0
# The model.
_A : Any = input_state_dict["""model"""]
# The language model.
_A : Tuple = model["""language_model"""]
# The embeddings.
_A : Any = lm["""embedding"""]
# The word embeddings.
_A : Dict = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_A : Union[str, Any] = word_embeddings[: config.vocab_size, :]
_A : Tuple = word_embeddings
# The position embeddings.
_A : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_A : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_A : Optional[int] = pos_embeddings
# The transformer.
_A : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_A : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_A : Union[str, Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_A : List[str] = layer_re.match(snake_case_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_A : Tuple = int(m.group(1 ) )
# The name of the operation.
_A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
_A : Dict = m.group(3 )
# The name of the layer.
_A : Optional[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_A : Union[str, Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_A : List[str] = torch.tril(torch.ones((n_positions, n_positions),dtype=torch.floataa ) ).view(
1,1,snake_case_,snake_case_ )
_A : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
_A : List[str] = torch.tensor(-1e4,dtype=torch.floataa )
_A : Tuple = masked_bias
_A : Tuple = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_A : Tuple = out_val.transpose(0,1 ).contiguous()
# Store.
_A : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_A : List[str] = fix_query_key_value_ordering(snake_case_,snake_case_,3,snake_case_,snake_case_ )
# Store. No change of shape.
_A : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_A : List[str] = megatron_to_transformers[op_name]
_A : Any = val.transpose(0,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_A : Dict = megatron_to_transformers[op_name]
_A : List[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_A : Optional[Any] = transformer["""final_layernorm.weight"""]
_A : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_A : List[str] = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
_A : Any = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""",action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""",type=snake_case_,help="""Path to the checkpoint file (.zip archive or direct .pt file)""",)
parser.add_argument(
"""--config_file""",default="""""",type=snake_case_,help="""An optional config json file describing the pre-trained model.""",)
_A : Optional[int] = parser.parse_args()
# Extract the basename.
_A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
else:
_A : Tuple = torch.load(args.path_to_checkpoint,map_location="""cpu""" )
_A : Optional[Any] = input_state_dict.get("""args""",snake_case_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_A : Union[str, Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_A : int = """gelu_new"""
else:
_A : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_A : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_A : Any = GPTaConfig(
vocab_size=50257,n_positions=1024,n_embd=1024,n_layer=24,n_head=16,n_inner=4096,activation_function=snake_case_,resid_pdrop=0.1,embd_pdrop=0.1,attn_pdrop=0.1,layer_norm_epsilon=1e-5,initializer_range=0.02,summary_type="""cls_index""",summary_use_proj=snake_case_,summary_activation=snake_case_,summary_proj_to_labels=snake_case_,summary_first_dropout=0.1,scale_attn_weights=snake_case_,use_cache=snake_case_,bos_token_id=50256,eos_token_id=50256,)
else:
_A : Union[str, Any] = GPTaConfig.from_json_file(args.config_file )
_A : List[str] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_A : Optional[Any] = convert_megatron_checkpoint(snake_case_,snake_case_,snake_case_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(snake_case_,snake_case_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_A : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_A : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_A : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_A : Optional[Any] = """gpt2"""
_A : List[str] = AutoTokenizer.from_pretrained(snake_case_ )
_A : Tuple = type(snake_case_ ).__name__
_A : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(snake_case_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(snake_case_ )
# Store the state_dict to file.
_A : Union[str, Any] = os.path.join(snake_case_,"""pytorch_model.bin""" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(snake_case_,snake_case_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 343
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A : Optional[int] = get_logger(__name__)
class __A ( enum.Enum ):
lowerCAmelCase_ : Dict = """all_checks"""
lowerCAmelCase_ : int = """basic_checks"""
lowerCAmelCase_ : Union[str, Any] = """no_checks"""
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ) -> str:
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
lowerCAmelCase : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCAmelCase : Union[str, Any] = " for " + verification_name if verification_name is not None else ""
if len(_UpperCAmelCase ) > 0:
raise NonMatchingChecksumError(
f"Checksums didn\'t match{for_verification_name}:\n"
f"{bad_urls}\n"
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
lowerCAmelCase : Optional[Any] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase ) )
logger.info('All the splits matched successfully.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = True ) -> Tuple:
'''simple docstring'''
if record_checksum:
lowerCAmelCase : Tuple = shaaaa()
with open(_UpperCAmelCase, 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ), b'' ):
m.update(_UpperCAmelCase )
lowerCAmelCase : str = m.hexdigest()
else:
lowerCAmelCase : Union[str, Any] = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 138
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : int = False
class __lowerCAmelCase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Optional[Any] =torch.manual_seed(0 )
a__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =generator.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] ="cyberpunk 2077"
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Union[str, Any] =torch.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : str ="A painting of a squirrel eating a burger "
a__ : Optional[int] =torch.manual_seed(0 )
a__ : str =pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 95
| 0
|
import math
import unittest
def snake_case (UpperCAmelCase__ ) -> bool:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def _a ( self ):
with self.assertRaises(_lowerCamelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 292
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: Any = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='crop_size' )
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: Tuple = do_rescale
UpperCamelCase_: Dict = do_normalize
UpperCamelCase_: Optional[int] = do_center_crop
UpperCamelCase_: Tuple = crop_size
UpperCamelCase_: Optional[int] = size
UpperCamelCase_: Dict = resample
UpperCamelCase_: Tuple = rescale_factor
UpperCamelCase_: Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_: Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
UpperCamelCase_: str = get_resize_output_image_size(_lowerCamelCase , size=size['shortest_edge'] , default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase_: Any = (size['height'], size['width'])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['height'], size['width']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase , param_name='crop_size' , default_to_square=_lowerCamelCase )
UpperCamelCase_: Tuple = resample if resample is not None else self.resample
UpperCamelCase_: str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: Dict = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: Dict = image_std if image_std is not None else self.image_std
UpperCamelCase_: Any = size if size is not None else self.size
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
UpperCamelCase_: Dict = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: List[str] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_: List[str] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_: int = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCamelCase_: List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: Optional[int] = {'pixel_values': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 292
| 1
|
"""simple docstring"""
def _A (__a , __a ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
UpperCAmelCase_ : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=32 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : str = pad_token_id
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = initializer_range
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : str = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
UpperCAmelCase_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : int = model_class_name(lowercase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : int = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Optional[Any] = model.decode(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : Any = model_class_name(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Dict = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 99
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Any = input_ids.shape[0]
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : Optional[int] = lm_model(input_ids=lowercase_ )
UpperCAmelCase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCAmelCase_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : Tuple = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Optional[Any] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ (lowercase__ ,unittest.TestCase ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = FlaxBlenderbotSmallModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[Any] = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : str = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 61
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__a = int(input('Enter number: ').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 17
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : List[Any]=1_8 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = size if size is not None else {"height": 2_0, "width": 2_0}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : str = max_resolution
_UpperCAmelCase : List[Any] = size
_UpperCAmelCase : Union[str, Any] = do_normalize
_UpperCAmelCase : Optional[Any] = do_convert_rgb
_UpperCAmelCase : str = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
_UpperCAmelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCAmelCase : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = PixaStructImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processor_tester.prepare_dummy_image()
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_UpperCAmelCase : str = 2_0_4_8
_UpperCAmelCase : Any = image_processor(lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCAmelCase : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase__ ):
_UpperCAmelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
_UpperCAmelCase : Any = "Hello"
_UpperCAmelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
_UpperCAmelCase : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCAmelCase : List[Any] = 3
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_convert_rgb" ) )
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCAmelCase : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(
lowerCAmelCase__ , return_tensors="pt" , max_patches=lowerCAmelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 17
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
A : Union[str, Any] = False
A : Tuple = False
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return TrainCommand(lowercase__ )
class _UpperCamelCase ( A__ ):
'''simple docstring'''
@staticmethod
def snake_case ( __a ):
__lowerCAmelCase = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=__A , required=__A , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=__A , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=__A , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=__A , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=__A , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=__A , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=__A , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=__A , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=__A , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=__A , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=__A , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=__A , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=__A , default=1e-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=__A )
def __init__( self , __a ):
__lowerCAmelCase = logging.get_logger("transformers-cli/training" )
__lowerCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__A )
__lowerCAmelCase = args.output
__lowerCAmelCase = args.column_label
__lowerCAmelCase = args.column_text
__lowerCAmelCase = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
__lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
__lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCAmelCase = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
__lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCAmelCase = args.validation_split
__lowerCAmelCase = args.train_batch_size
__lowerCAmelCase = args.valid_batch_size
__lowerCAmelCase = args.learning_rate
__lowerCAmelCase = args.adam_epsilon
def snake_case ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self ):
raise NotImplementedError
def snake_case ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 57
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :List[Any] = jax.device_count()
lowerCAmelCase_ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Optional[Any] = replicate(__A )
lowerCAmelCase_ :Union[str, Any] = shard(__A )
lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2"""
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :Optional[int] = scheduler_params
lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = jax.device_count()
lowerCAmelCase_ :str = num_samples * [prompt]
lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Tuple = replicate(__A )
lowerCAmelCase_ :Optional[int] = shard(__A )
lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 84
| 0
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["input_features", "attention_mask"]
def __init__( self : str , __A : Dict=8_0 , __A : Tuple=1_6_0_0_0 , __A : List[str]=0.0 , __A : Optional[int]=1_0 , __A : int=2_5 , __A : List[str]="hamming_window" , __A : Any=3_2_7_6_8.0 , __A : Union[str, Any]=0.9_7 , __A : Any=1.0 , __A : Union[str, Any]=True , __A : Any=True , __A : Optional[int]=False , **__A : Tuple , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
snake_case__ : int = feature_size
snake_case__ : Optional[int] = sampling_rate
snake_case__ : str = padding_value
snake_case__ : Union[str, Any] = hop_length
snake_case__ : Optional[Any] = win_length
snake_case__ : Dict = frame_signal_scale
snake_case__ : Any = preemphasis_coeff
snake_case__ : Optional[int] = mel_floor
snake_case__ : List[str] = normalize_means
snake_case__ : Union[str, Any] = normalize_vars
snake_case__ : Optional[int] = win_function
snake_case__ : List[str] = return_attention_mask
snake_case__ : Any = win_length * sampling_rate // 1_0_0_0
snake_case__ : int = hop_length * sampling_rate // 1_0_0_0
snake_case__ : Optional[Any] = optimal_fft_length(self.sample_size )
snake_case__ : Union[str, Any] = (self.n_fft // 2) + 1
def _lowercase ( self : List[str] , __A : np.array ):
if self.win_function == "hamming_window":
snake_case__ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__A )
else:
snake_case__ : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function )
snake_case__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
snake_case__ : int = spectrogram(
one_waveform * self.frame_signal_scale , window=__A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__A , preemphasis=self.preemphasis_coeff , mel_filters=__A , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def _lowercase ( self : Optional[int] , __A : List[str] , __A : List[Any] , __A : List[Any] ):
# make sure we normalize float32 arrays
if self.normalize_means:
snake_case__ : Optional[Any] = x[:input_length].mean(axis=0 )
snake_case__ : str = np.subtract(__A , __A )
if self.normalize_vars:
snake_case__ : Dict = x[:input_length].std(axis=0 )
snake_case__ : Optional[int] = np.divide(__A , __A )
if input_length < x.shape[0]:
snake_case__ : Dict = padding_value
# make sure array is in float32
snake_case__ : Union[str, Any] = x.astype(np.floataa )
return x
def _lowercase ( self : List[str] , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ):
snake_case__ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A , __A , self.padding_value ) for x, n in zip(__A , __A )]
def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , **__A : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
snake_case__ : int = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case__ : Optional[Any] = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : Union[str, Any] = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
snake_case__ : str = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Optional[Any] = [raw_speech]
# extract fbank features
snake_case__ : Optional[int] = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
snake_case__ : List[str] = BatchFeature({"input_features": features} )
snake_case__ : str = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
# make sure list is in array format
snake_case__ : List[str] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , __A ):
snake_case__ : List[Any] = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
snake_case__ : Any = padded_inputs.get("attention_mask" )
if attention_mask is not None:
snake_case__ : Optional[Any] = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
snake_case__ : List[str] = (
np.array(__A , dtype=np.intaa )
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
snake_case__ : int = self.normalize(
padded_inputs["input_features"] , attention_mask=__A )
if return_tensors is not None:
snake_case__ : List[str] = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 353
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 0
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase : Tuple = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class A__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowercase = " ") -> Tuple:
'''simple docstring'''
a__ : Tuple = sentence_delimiter
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return list(lowercase)
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = []
for sent_idx, sentence in enumerate(lowercase):
chars.extend(self.process_string(lowercase))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase) - 1:
chars.append(self.sentence_delimiter)
return chars
lowercase : Union[str, Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase : List[Any] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowercase : Optional[int] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowercase : Optional[Any] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Any:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"]
a__ : Optional[int] = 0
a__ : str = 0
for prediction, reference in zip(lowercase , lowercase):
a__ : Optional[int] = jiwer.compute_measures(
lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 99
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = True
try:
AutoConfig.register("""custom""" , lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 343
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'vivit'
def __init__( self :int , a :Optional[int]=2_2_4 , a :int=3_2 , a :Optional[int]=[2, 1_6, 1_6] , a :List[str]=3 , a :str=7_6_8 , a :Optional[int]=1_2 , a :Optional[int]=1_2 , a :Tuple=3_0_7_2 , a :Dict="gelu_fast" , a :int=0.0 , a :Dict=0.0 , a :Optional[Any]=0.02 , a :Any=1E-0_6 , a :Optional[Any]=True , **a :Union[str, Any] , ) -> str:
__UpperCamelCase : Union[str, Any] = hidden_size
__UpperCamelCase : Optional[int] = num_hidden_layers
__UpperCamelCase : Dict = num_attention_heads
__UpperCamelCase : Union[str, Any] = intermediate_size
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : Dict = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : int = initializer_range
__UpperCamelCase : int = layer_norm_eps
__UpperCamelCase : List[str] = image_size
__UpperCamelCase : int = num_frames
__UpperCamelCase : Tuple = tubelet_size
__UpperCamelCase : Optional[Any] = num_channels
__UpperCamelCase : List[str] = qkv_bias
super().__init__(**a )
| 151
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase : Any = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
lowercase : str = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
lowercase : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
lowercase : List[str] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
lowercase : List[Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def _lowerCamelCase ( self :str , a :Tuple , a :str , a :Tuple=[1, 1_0, 1_0_0] , a :Optional[Any]=4 , a :Optional[int]=3.0 ) -> Dict:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a ) as executor:
__UpperCamelCase : List[Any] = []
__UpperCamelCase : str = Counter()
__UpperCamelCase : Tuple = 0
__UpperCamelCase : Dict = defaultdict(a )
for task_id, (candidates, test_case) in enumerate(zip(a , a ) ):
for candidate in candidates:
__UpperCamelCase : List[str] = candidate + "\n" + test_case
__UpperCamelCase : Tuple = (test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : str = executor.submit(a , *a )
futures.append(a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a ):
__UpperCamelCase : int = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__UpperCamelCase , __UpperCamelCase : Tuple = [], []
for result in results.values():
result.sort()
__UpperCamelCase : List[Any] = [r[1]["passed"] for r in result]
total.append(len(a ) )
correct.append(sum(a ) )
__UpperCamelCase : Union[str, Any] = np.array(a )
__UpperCamelCase : Dict = np.array(a )
__UpperCamelCase : List[str] = k
__UpperCamelCase : Optional[int] = {f'pass@{k}': estimate_pass_at_k(a , a , a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]) -> Dict:
'''simple docstring'''
def estimator(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : List[Any] = itertools.repeat(_lowerCamelCase , len(_lowerCamelCase))
else:
assert len(_lowerCamelCase) == len(_lowerCamelCase)
__UpperCamelCase : Optional[int] = iter(_lowerCamelCase)
return np.array([estimator(int(_lowerCamelCase) , int(_lowerCamelCase) , _lowerCamelCase) for n, c in zip(_lowerCamelCase , _lowerCamelCase)])
| 151
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.