code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=a ):
A__ : str = ['flax', 'transformers']
def __init__( self : Optional[int] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : Dict , *UpperCAmelCase__ : int , **UpperCAmelCase__ : int ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : Optional[int] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class a_ ( metaclass=a ):
A__ : Union[str, Any] = ['flax', 'transformers']
def __init__( self : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str] ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : int , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : str , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class a_ ( metaclass=a ):
A__ : str = ['flax', 'transformers']
def __init__( self : Optional[int] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : List[str] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : int ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
class a_ ( metaclass=a ):
A__ : Optional[int] = ['flax', 'transformers']
def __init__( self : List[str] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : Tuple , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def lowerCAmelCase( cls : int , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(cls , ['''flax''', '''transformers'''] )
| 598
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A : Optional[int] = logging.get_logger(__name__)
class __A( a ):
snake_case_ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BICUBIC , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = IMAGENET_DEFAULT_MEAN , _snake_case = IMAGENET_DEFAULT_STD , **_snake_case , ) -> None:
'''simple docstring'''
super().__init__(**_snake_case )
__a = size if size is not None else {'''shortest_edge''': 224}
__a = get_size_dict(_snake_case , default_to_square=_snake_case )
__a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__a = get_size_dict(_snake_case , param_name='''crop_size''' )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
__a = get_size_dict(_snake_case , default_to_square=_snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size['''shortest_edge'''] )
__a = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case )
__a = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
__a = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ) -> BatchFeature:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(_snake_case , default_to_square=_snake_case )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(_snake_case , param_name='''crop_size''' )
__a = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__a = [self.resize(_snake_case , _snake_case , _snake_case ) for image in images]
if do_center_crop:
__a = [self.center_crop(_snake_case , _snake_case ) for image in images]
if do_rescale:
__a = [self.rescale(_snake_case , _snake_case ) for image in images]
if do_normalize:
__a = [self.normalize(_snake_case , _snake_case , _snake_case ) for image in images]
__a = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__a = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 219
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __magic_name__ ( A__ ):
UpperCamelCase : int = 42
class __magic_name__ ( nn.Module ):
def __init__( self , __magic_name__=3 , __magic_name__=3 , __magic_name__=("DownEncoderBlock2D",) , __magic_name__=(6_4,) , __magic_name__=2 , __magic_name__=3_2 , __magic_name__="silu" , __magic_name__=True , ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = layers_per_block
_lowerCAmelCase = torch.nn.Convad(
__magic_name__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_lowerCAmelCase = None
_lowerCAmelCase = nn.ModuleList([] )
# down
_lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__magic_name__ ):
_lowerCAmelCase = output_channel
_lowerCAmelCase = block_out_channels[i]
_lowerCAmelCase = i == len(__magic_name__ ) - 1
_lowerCAmelCase = get_down_block(
__magic_name__ , num_layers=self.layers_per_block , in_channels=__magic_name__ , out_channels=__magic_name__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__magic_name__ , resnet_groups=__magic_name__ , attention_head_dim=__magic_name__ , temb_channels=__magic_name__ , )
self.down_blocks.append(__magic_name__ )
# mid
_lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__magic_name__ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=__magic_name__ , temb_channels=__magic_name__ , )
# out
_lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__magic_name__ , eps=1e-6 )
_lowerCAmelCase = nn.SiLU()
_lowerCAmelCase = 2 * out_channels if double_z else out_channels
_lowerCAmelCase = nn.Convad(block_out_channels[-1] , __magic_name__ , 3 , padding=1 )
_lowerCAmelCase = False
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = x
_lowerCAmelCase = self.conv_in(__magic_name__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__magic_name__ ):
def custom_forward(*__magic_name__ ):
return module(*__magic_name__ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__magic_name__ ) , __magic_name__ , use_reentrant=__magic_name__ )
# middle
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __magic_name__ , use_reentrant=__magic_name__ )
else:
for down_block in self.down_blocks:
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__magic_name__ ) , __magic_name__ )
# middle
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __magic_name__ )
else:
# down
for down_block in self.down_blocks:
_lowerCAmelCase = down_block(__magic_name__ )
# middle
_lowerCAmelCase = self.mid_block(__magic_name__ )
# post-process
_lowerCAmelCase = self.conv_norm_out(__magic_name__ )
_lowerCAmelCase = self.conv_act(__magic_name__ )
_lowerCAmelCase = self.conv_out(__magic_name__ )
return sample
class __magic_name__ ( nn.Module ):
def __init__( self , __magic_name__=3 , __magic_name__=3 , __magic_name__=("UpDecoderBlock2D",) , __magic_name__=(6_4,) , __magic_name__=2 , __magic_name__=3_2 , __magic_name__="silu" , __magic_name__="group" , ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = layers_per_block
_lowerCAmelCase = nn.Convad(
__magic_name__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_lowerCAmelCase = None
_lowerCAmelCase = nn.ModuleList([] )
_lowerCAmelCase = in_channels if norm_type == 'spatial' else None
# mid
_lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__magic_name__ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__magic_name__ , temb_channels=__magic_name__ , )
# up
_lowerCAmelCase = list(reversed(__magic_name__ ) )
_lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__magic_name__ ):
_lowerCAmelCase = output_channel
_lowerCAmelCase = reversed_block_out_channels[i]
_lowerCAmelCase = i == len(__magic_name__ ) - 1
_lowerCAmelCase = get_up_block(
__magic_name__ , num_layers=self.layers_per_block + 1 , in_channels=__magic_name__ , out_channels=__magic_name__ , prev_output_channel=__magic_name__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__magic_name__ , resnet_groups=__magic_name__ , attention_head_dim=__magic_name__ , temb_channels=__magic_name__ , resnet_time_scale_shift=__magic_name__ , )
self.up_blocks.append(__magic_name__ )
_lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
_lowerCAmelCase = SpatialNorm(block_out_channels[0] , __magic_name__ )
else:
_lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__magic_name__ , eps=1e-6 )
_lowerCAmelCase = nn.SiLU()
_lowerCAmelCase = nn.Convad(block_out_channels[0] , __magic_name__ , 3 , padding=1 )
_lowerCAmelCase = False
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=None ):
"""simple docstring"""
_lowerCAmelCase = z
_lowerCAmelCase = self.conv_in(__magic_name__ )
_lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__magic_name__ ):
def custom_forward(*__magic_name__ ):
return module(*__magic_name__ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __magic_name__ , __magic_name__ , use_reentrant=__magic_name__ )
_lowerCAmelCase = sample.to(__magic_name__ )
# up
for up_block in self.up_blocks:
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__magic_name__ ) , __magic_name__ , __magic_name__ , use_reentrant=__magic_name__ )
else:
# middle
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __magic_name__ , __magic_name__ )
_lowerCAmelCase = sample.to(__magic_name__ )
# up
for up_block in self.up_blocks:
_lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__magic_name__ ) , __magic_name__ , __magic_name__ )
else:
# middle
_lowerCAmelCase = self.mid_block(__magic_name__ , __magic_name__ )
_lowerCAmelCase = sample.to(__magic_name__ )
# up
for up_block in self.up_blocks:
_lowerCAmelCase = up_block(__magic_name__ , __magic_name__ )
# post-process
if latent_embeds is None:
_lowerCAmelCase = self.conv_norm_out(__magic_name__ )
else:
_lowerCAmelCase = self.conv_norm_out(__magic_name__ , __magic_name__ )
_lowerCAmelCase = self.conv_act(__magic_name__ )
_lowerCAmelCase = self.conv_out(__magic_name__ )
return sample
class __magic_name__ ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="random" , __magic_name__=False , __magic_name__=True ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = n_e
_lowerCAmelCase = vq_embed_dim
_lowerCAmelCase = beta
_lowerCAmelCase = legacy
_lowerCAmelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_lowerCAmelCase = self.used.shape[0]
_lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_lowerCAmelCase = self.re_embed
_lowerCAmelCase = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
_lowerCAmelCase = n_e
_lowerCAmelCase = sane_index_shape
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = inds.shape
assert len(__magic_name__ ) > 1
_lowerCAmelCase = inds.reshape(ishape[0] , -1 )
_lowerCAmelCase = self.used.to(__magic_name__ )
_lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
_lowerCAmelCase = match.argmax(-1 )
_lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
_lowerCAmelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_lowerCAmelCase = self.unknown_index
return new.reshape(__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = inds.shape
assert len(__magic_name__ ) > 1
_lowerCAmelCase = inds.reshape(ishape[0] , -1 )
_lowerCAmelCase = self.used.to(__magic_name__ )
if self.re_embed > self.used.shape[0]: # extra token
_lowerCAmelCase = 0 # simply set to zero
_lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __magic_name__ )
return back.reshape(__magic_name__ )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
_lowerCAmelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_lowerCAmelCase = torch.argmin(torch.cdist(__magic_name__ , self.embedding.weight ) , dim=1 )
_lowerCAmelCase = self.embedding(__magic_name__ ).view(z.shape )
_lowerCAmelCase = None
_lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
_lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
_lowerCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_lowerCAmelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_lowerCAmelCase = self.remap_to_used(__magic_name__ )
_lowerCAmelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ ):
"""simple docstring"""
if self.remap is not None:
_lowerCAmelCase = indices.reshape(shape[0] , -1 ) # add batch axis
_lowerCAmelCase = self.unmap_to_all(__magic_name__ )
_lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_lowerCAmelCase = self.embedding(__magic_name__ )
if shape is not None:
_lowerCAmelCase = z_q.view(__magic_name__ )
# reshape back to match original input shape
_lowerCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __magic_name__ ( A__ ):
def __init__( self , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
_lowerCAmelCase = parameters
_lowerCAmelCase , _lowerCAmelCase = torch.chunk(__magic_name__ , 2 , dim=1 )
_lowerCAmelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
_lowerCAmelCase = deterministic
_lowerCAmelCase = torch.exp(0.5 * self.logvar )
_lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
_lowerCAmelCase = _lowerCAmelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowerCamelCase ( self , __magic_name__ = None ):
"""simple docstring"""
_lowerCAmelCase = randn_tensor(
self.mean.shape , generator=__magic_name__ , device=self.parameters.device , dtype=self.parameters.dtype )
_lowerCAmelCase = self.mean + self.std * sample
return x
def _lowerCamelCase ( self , __magic_name__=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 589
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def a ( lowerCamelCase_=None ):
'''simple docstring'''
if subparsers is not None:
lowercase__ = subparsers.add_parser('''test''' )
else:
lowercase__ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_ )
return parser
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowercase__ = script_name
else:
lowercase__ = F"""--config_file={args.config_file} {script_name}"""
lowercase__ = ['''accelerate-launch'''] + test_args.split()
lowercase__ = execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def a ( ):
'''simple docstring'''
lowercase__ = test_command_parser()
lowercase__ = parser.parse_args()
test_command(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 183
| 0
|
"""simple docstring"""
def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ):
"""simple docstring"""
_snake_case : Optional[int] = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87
|
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87
| 1
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a : Any = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
a : Any = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
def remove_articles(__lowerCamelCase : List[str] ):
__UpperCAmelCase : List[str] = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__lowerCamelCase , """ """ , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : Any ):
__UpperCAmelCase : List[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )]
return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 100
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : Dict = [rgram for rgrams in rgramslist for rgram in rgrams]
__UpperCAmelCase : Optional[int] = Counter(__lowerCamelCase )
__UpperCAmelCase : List[Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : str = Counter()
for sgram, scount in sgramcounter.items():
__UpperCAmelCase : int = scount * numref
__UpperCAmelCase : Union[str, Any] = Counter(__lowerCamelCase )
__UpperCAmelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
__UpperCAmelCase : str = ccount * numref
# KEEP
__UpperCAmelCase : Dict = sgramcounter_rep & cgramcounter_rep
__UpperCAmelCase : str = keepgramcounter_rep & rgramcounter
__UpperCAmelCase : Optional[Any] = sgramcounter_rep & rgramcounter
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : int = 1
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Optional[int] = keeptmpscorea / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__UpperCAmelCase : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__UpperCAmelCase : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__UpperCAmelCase : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__UpperCAmelCase : List[str] = sgramcounter_rep - cgramcounter_rep
__UpperCAmelCase : Union[str, Any] = delgramcounter_rep - rgramcounter
__UpperCAmelCase : Union[str, Any] = sgramcounter_rep - rgramcounter
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Union[str, Any] = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Any = deltmpscorea / len(__lowerCamelCase )
# ADDITION
__UpperCAmelCase : Optional[int] = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : List[str] = set(__lowerCamelCase ) & set(__lowerCamelCase )
__UpperCAmelCase : Tuple = set(__lowerCamelCase ) - set(__lowerCamelCase )
__UpperCAmelCase : Any = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : str = 1
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Dict = addtmpscore / len(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : str = addtmpscore / len(__lowerCamelCase )
__UpperCAmelCase : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
__UpperCAmelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[int] = len(__lowerCamelCase )
__UpperCAmelCase : Any = ssent.split(""" """ )
__UpperCAmelCase : List[str] = csent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[int] = []
for rsent in rsents:
__UpperCAmelCase : List[str] = rsent.split(""" """ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : int = []
__UpperCAmelCase : str = []
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[Any] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
ragramslist.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Tuple = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Any = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Union[str, Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__lowerCamelCase )
for i in range(0 , len(__lowerCamelCase ) - 1 ):
if i < len(__lowerCamelCase ) - 1:
__UpperCAmelCase : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 2:
__UpperCAmelCase : Dict = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__lowerCamelCase )
if i < len(__lowerCamelCase ) - 3:
__UpperCAmelCase : Tuple = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[int] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : int = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__UpperCAmelCase : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4
__UpperCAmelCase : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
__UpperCAmelCase : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : bool = True , __lowerCamelCase : str = "13a" , __lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__UpperCAmelCase : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__UpperCAmelCase : List[str] = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase )
else:
__UpperCAmelCase : Optional[int] = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase )
elif tokenizer == "moses":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase )
elif tokenizer == "penn":
__UpperCAmelCase : Optional[int] = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase )
else:
__UpperCAmelCase : str = sentence
if not return_str:
__UpperCAmelCase : Optional[int] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int ):
if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
__UpperCAmelCase : Union[str, Any] = 0
for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] )
__UpperCAmelCase : List[str] = sari_score / len(__lowerCamelCase )
return 100 * sari_score
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]="exp" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : str=False , ):
__UpperCAmelCase : Optional[int] = len(references[0] )
if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__UpperCAmelCase : Optional[Any] = [[refs[i] for refs in references] for i in range(__lowerCamelCase )]
__UpperCAmelCase : Union[str, Any] = sacrebleu.corpus_bleu(
__lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase ( self : int , __lowercase : str , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]:
__UpperCAmelCase : str = {}
result.update({"""sari""": compute_sari(sources=__lowercase , predictions=__lowercase , references=__lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=__lowercase , references=__lowercase )} )
result.update({"""exact""": compute_em(predictions=__lowercase , references=__lowercase )} )
return result
| 63
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'openai-gpt'
a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __lowercase : Tuple=40478 , __lowercase : Tuple=512 , __lowercase : int=768 , __lowercase : Dict=12 , __lowercase : Union[str, Any]=12 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=1e-5 , __lowercase : Any=0.02 , __lowercase : List[str]="cls_index" , __lowercase : str=True , __lowercase : Dict=None , __lowercase : str=True , __lowercase : List[str]=0.1 , **__lowercase : List[Any] , ) -> List[Any]:
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = n_positions
__UpperCAmelCase : Optional[int] = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Tuple = afn
__UpperCAmelCase : Any = resid_pdrop
__UpperCAmelCase : Union[str, Any] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : str = layer_norm_epsilon
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = summary_type
__UpperCAmelCase : Optional[Any] = summary_use_proj
__UpperCAmelCase : List[Any] = summary_activation
__UpperCAmelCase : Union[str, Any] = summary_first_dropout
__UpperCAmelCase : Dict = summary_proj_to_labels
super().__init__(**__lowercase )
| 63
| 1
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Any = (DDIMParallelScheduler,)
_lowercase : str = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowercase )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase , _lowerCAmelCase = 10, 0.0
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for t in scheduler.timesteps:
_lowerCAmelCase = model(_lowercase , _lowercase )
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ).prev_sample
return sample
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase = scheduler_class(**_lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowercase , num_inference_steps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowercase , eta=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase , _lowerCAmelCase = 10, 0.0
scheduler.set_timesteps(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowercase )
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 715
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[Any] = DDIMPipeline
_lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowercase : Any = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
_lowercase : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowercase : Tuple = False
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""unet""": unet, """scheduler""": scheduler}
return components
def _lowercase ( self , _lowercase , _lowercase=0 ):
"""simple docstring"""
if str(_lowercase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = self.get_dummy_inputs(_lowercase )
_lowerCAmelCase = pipe(**_lowercase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCAmelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """google/ddpm-cifar10-32"""
_lowerCAmelCase = UNetaDModel.from_pretrained(_lowercase )
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddim.to(_lowercase )
ddim.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ddim(generator=_lowercase , eta=0.0 , output_type="""numpy""" ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """google/ddpm-ema-bedroom-256"""
_lowerCAmelCase = UNetaDModel.from_pretrained(_lowercase )
_lowerCAmelCase = DDIMScheduler.from_pretrained(_lowercase )
_lowerCAmelCase = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddpm.to(_lowercase )
ddpm.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ddpm(generator=_lowercase , output_type="""numpy""" ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 162
| 0
|
from __future__ import annotations
__UpperCamelCase : str = [True] * 1000001
__UpperCamelCase : Tuple = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__UpperCamelCase : List[Any] = False
i += 1
def a_ ( _A ) -> bool:
"""simple docstring"""
return seive[n]
def a_ ( _A ) -> bool:
"""simple docstring"""
return any(digit in '02468' for digit in str(_A ) )
def a_ ( _A = 1000000 ) -> list[int]:
"""simple docstring"""
snake_case__ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_A ) and not contains_an_even_digit(_A ):
snake_case__ = str(_A )
snake_case__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(_A ) )]
if all(is_prime(_A ) for i in list_nums ):
result.append(_A )
return result
def a_ ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 328
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''' )
def snake_case__ ( self : str , a__ : int=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case__ ( self : Optional[int] , a__ : Tuple ):
TrainingJobAnalytics(a__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def snake_case__ ( self : Any ):
# create estimator
__magic_name__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__magic_name__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__magic_name__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__ )
| 432
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
def update_area_of_max_square(SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase : str = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase : Optional[Any] = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase : int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase : List[Any] = 1 + min([right, diagonal, down] )
__lowerCAmelCase : int = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase : List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase : Dict = 1 + min([right, diagonal, down] )
__lowerCAmelCase : int = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase : Optional[int] = [0]
__lowerCAmelCase : List[Any] = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : Tuple = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase : List[str] = dp_array[row][col + 1]
__lowerCAmelCase : List[str] = dp_array[row + 1][col + 1]
__lowerCAmelCase : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase : List[str] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Optional[Any] = 0
return largest_square_area
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : Optional[int] = [0] * (cols + 1)
__lowerCAmelCase : List[Any] = [0] * (cols + 1)
__lowerCAmelCase : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase : Dict = current_row[col + 1]
__lowerCAmelCase : Any = next_row[col + 1]
__lowerCAmelCase : Optional[Any] = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase : int = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = 0
__lowerCAmelCase : List[str] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 240
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class snake_case_ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCAmelCase : List[Any] = CLIPTextModel(_snake_case )
__lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase : Tuple = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , _snake_case : str , _snake_case : int=0 )->str:
'''simple docstring'''
if str(_snake_case ).startswith("""mps""" ):
__lowerCAmelCase : int = torch.manual_seed(_snake_case )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
__lowerCAmelCase : Any = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Tuple = Image.fromarray(np.uinta(_snake_case ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCAmelCase : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase__ ( self : Tuple )->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case : Optional[Any] ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCAmelCase : Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCAmelCase : Optional[Any] = CLIPTextModel(_snake_case )
__lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase : List[Any] = MultiControlNetModel([controlneta, controlneta] )
__lowerCAmelCase : List[str] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Dict , _snake_case : List[Any]=0 )->int:
'''simple docstring'''
if str(_snake_case ).startswith("""mps""" ):
__lowerCAmelCase : int = torch.manual_seed(_snake_case )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
__lowerCAmelCase : int = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Optional[int] = Image.fromarray(np.uinta(_snake_case ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCAmelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
__lowerCAmelCase : Any = 10.0
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : List[Any] = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : int = steps
__lowerCAmelCase : Tuple = scale
__lowerCAmelCase : Optional[int] = pipe(**_snake_case )[0]
__lowerCAmelCase : str = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : List[Any] = steps
__lowerCAmelCase : Optional[Any] = scale
__lowerCAmelCase : Any = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCAmelCase : str = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Optional[Any] = steps
__lowerCAmelCase : str = scale
__lowerCAmelCase : str = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Optional[int] = steps
__lowerCAmelCase : Union[str, Any] = scale
__lowerCAmelCase : List[Any] = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCAmelCase__ ( self : Tuple )->Dict:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : Tuple )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] )->List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] )->Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
__lowerCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : List[str] = """evil space-punk bird"""
__lowerCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
__lowerCAmelCase : List[Any] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
__lowerCAmelCase : int = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
__lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 240
| 1
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase__ : List[Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
__A : Any = TOKEN
HfFolder.save_token(snake_case__)
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('test-config' , use_auth_token=self._token)
__A : List[str] = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__))
# Reset repo
delete_repo(token=self._token , repo_id='test-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id='test-config' , push_to_hub=snake_case__ , use_auth_token=self._token)
__A : Optional[int] = BertConfig.from_pretrained(F'{USER}/test-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token)
__A : str = BertConfig.from_pretrained('valid_org/test-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id='valid_org/test-config-org' , push_to_hub=snake_case__ , use_auth_token=self._token)
__A : str = BertConfig.from_pretrained('valid_org/test-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
CustomConfig.register_for_auto_class()
__A : Optional[int] = CustomConfig(attribute=42)
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'})
__A : str = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=snake_case__)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig')
self.assertEqual(new_config.attribute , 42)
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__A : Any = c.n_embd + 1 # int
__A : Any = c.resid_pdrop + 1.0 # float
__A : List[str] = not c.scale_attn_weights # bool
__A : int = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}')
self.assertEqual(snake_case__ , c.n_embd , 'mismatch for key: n_embd')
self.assertEqual(snake_case__ , c.resid_pdrop , 'mismatch for key: resid_pdrop')
self.assertEqual(snake_case__ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights')
self.assertEqual(snake_case__ , c.summary_type , 'mismatch for key: summary_type')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = PretrainedConfig()
__A : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'])
__A : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__)]
if len(snake_case__) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(snake_case__)}.')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(snake_case__):
# config is in subfolder, the following should not work without specifying the subfolder
__A : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder')
__A : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert')
self.assertIsNotNone(snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = mock.Mock()
__A : Optional[Any] = 500
__A : int = {}
__A : int = HTTPError
__A : str = {}
# Download this model to make sure it's in the cache.
__A : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=snake_case__) as mock_head:
__A : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = AutoConfig.from_pretrained('bert-base-cased')
__A : List[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__)
__A : Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , 'config.4.0.0.json') , 'w'))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__A : Tuple = AutoConfig.from_pretrained(snake_case__)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__A : Dict = ['config.42.0.0.json']
__A : str = 768
configuration.save_pretrained(snake_case__)
shutil.move(os.path.join(snake_case__ , 'config.4.0.0.json') , os.path.join(snake_case__ , 'config.42.0.0.json'))
__A : int = AutoConfig.from_pretrained(snake_case__)
self.assertEqual(new_configuration.hidden_size , 768)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__A : int = 'v4.0.0'
__A ,__A : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__A : List[str] = 'v3.0.0'
__A : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__)
self.assertEqual(old_configuration.hidden_size , 768)
| 8
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def _snake_case ( *snake_case__ : Optional[int] , **snake_case__ : Tuple ) -> Optional[Any]:
pass
def lowerCamelCase ( UpperCamelCase : str ) -> Tuple:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : str , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ) -> Dict:
_lowerCamelCase = pipeline(
'document-question-answering' , model=snake_case__ , tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
_lowerCamelCase = 'What is the placebo?'
_lowerCamelCase = [
{
'image': load_image(snake_case__ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Tuple:
_lowerCamelCase = dqa_pipeline(snake_case__ , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
_lowerCamelCase = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'How many cats are there?'
_lowerCamelCase = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , words=snake_case__ , boxes=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : int ) -> Optional[Any]:
_lowerCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : int ) -> List[Any]:
_lowerCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case__ )
_lowerCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case__ , revision='3dc6de3' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ) -> List[str]:
_lowerCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case__ )
_lowerCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case__ , revision='3dc6de3' , max_seq_len=5_0 , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def _snake_case ( self : Dict ) -> int:
_lowerCamelCase = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _snake_case ( self : Optional[Any] ) -> Any:
pass
| 544
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__UpperCAmelCase ={
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowercase__ : Any = 'albert'
def __init__( self , UpperCamelCase__=3_00_00 , UpperCamelCase__=1_28 , UpperCamelCase__=40_96 , UpperCamelCase__=12 , UpperCamelCase__=1 , UpperCamelCase__=64 , UpperCamelCase__=1_63_84 , UpperCamelCase__=1 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=5_12 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0.1 , UpperCamelCase__="absolute" , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=3 , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ = vocab_size
A__ = embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_hidden_groups
A__ = num_attention_heads
A__ = inner_group_num
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = classifier_dropout_prob
A__ = position_embedding_type
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 714
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Dict = """imagegpt"""
lowercase__ : str = ["""past_key_values"""]
lowercase__ : Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase__=5_12 + 1 , UpperCamelCase__=32 * 32 , UpperCamelCase__=5_12 , UpperCamelCase__=24 , UpperCamelCase__=8 , UpperCamelCase__=None , UpperCamelCase__="quick_gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = scale_attn_weights
A__ = use_cache
A__ = scale_attn_by_inverse_layer_idx
A__ = reorder_and_upcast_attn
A__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = 1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 3 , UpperCamelCase__ = 32 , UpperCamelCase__ = 32 , ):
'''simple docstring'''
A__ = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return inputs
| 261
| 0
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(lowercase__ , np.ndarray ):
return list(tensor.shape )
SCREAMING_SNAKE_CASE__ : str = tf.shape(lowercase__ )
if tensor.shape == tf.TensorShape(lowercase__ ):
return dynamic
SCREAMING_SNAKE_CASE__ : Optional[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase__ )]
def _a ( lowercase__ : tf.Tensor , lowercase__ : Optional[int] = None , lowercase__ : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowercase__ , name=lowercase__ )
def _a ( lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Optional[Any]=1E-5 , lowercase__ : Union[str, Any]=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__ , lowercase__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = tf.nn.moments(lowercase__ , axes=[axis] , keepdims=lowercase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
SCREAMING_SNAKE_CASE__ : List[Any] = [1] * inputs.shape.rank
SCREAMING_SNAKE_CASE__ : Dict = shape_list(lowercase__ )[axis]
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.reshape(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.reshape(lowercase__ , lowercase__ )
# Compute layer normalization using the batch_normalization
# function.
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.nn.batch_normalization(
lowercase__ , lowercase__ , lowercase__ , offset=lowercase__ , scale=lowercase__ , variance_epsilon=lowercase__ , )
return outputs
def _a ( lowercase__ : List[Any] , lowercase__ : Dict=0 , lowercase__ : List[Any]=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
SCREAMING_SNAKE_CASE__ : str = tf.shape(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase__ , lowercase__ )
def _a ( lowercase__ : tf.Tensor ):
'''simple docstring'''
if not isinstance(lowercase__ , tf.Tensor ):
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(lowercase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
SCREAMING_SNAKE_CASE__ : str = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
SCREAMING_SNAKE_CASE__ : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
SCREAMING_SNAKE_CASE__ : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( lowercase__ : tf.Tensor , lowercase__ : int , lowercase__ : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
lowercase__ , tf.cast(lowercase__ , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__ )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
SCREAMING_SNAKE_CASE__ : str = [x for x in data if len(lowercase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
SCREAMING_SNAKE_CASE__ : Any = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array_split(lowercase__ , lowercase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array_split(lowercase__ , lowercase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = chunk_data
else:
SCREAMING_SNAKE_CASE__ : Tuple = data
def _a ( lowercase__ : List[Any] , lowercase__ : int ):
'''simple docstring'''
if name in group.attrs:
SCREAMING_SNAKE_CASE__ : Dict = [n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs[name]]
else:
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
def _expand_single_ad_tensor(lowercase__ : Optional[int] ):
if isinstance(lowercase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase__ )
| 85
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
A = HfArgumentParser(UpperCamelCase__ )
A = parser.parse_args_into_dataclasses()[0]
A = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
A = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A = """ """.join(str(UpperCamelCase__ ).split(""" """ )[:-1] )
A = """"""
A = eval(str(UpperCamelCase__ ).split(""" """ )[-1] )
A = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 641
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCAmelCase__ : int = logging.getLogger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
snake_case__ = "sequence-classification"
def __init__( self : Optional[int] ,lowerCamelCase__ : str ):
if type(UpperCamelCase__ ) == dict:
UpperCAmelCase__ = Namespace(**UpperCamelCase__ )
UpperCAmelCase__ = glue_output_modes[hparams.task]
UpperCAmelCase__ = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ ,UpperCamelCase__ ,self.mode )
def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Optional[Any] ):
return self.model(**UpperCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCAmelCase__ = self(**UpperCamelCase__ )
UpperCAmelCase__ = outputs[0]
UpperCAmelCase__ = self.trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase__ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.hparams
UpperCAmelCase__ = processors[args.task]()
UpperCAmelCase__ = processor.get_labels()
for mode in ["train", "dev"]:
UpperCAmelCase__ = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' ,UpperCamelCase__ )
else:
logger.info('Creating features from dataset file at %s' ,args.data_dir )
UpperCAmelCase__ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
UpperCAmelCase__ = convert_examples_to_features(
UpperCamelCase__ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info('Saving features into cached file %s' ,UpperCamelCase__ )
torch.save(UpperCamelCase__ ,UpperCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] = False ):
UpperCAmelCase__ = '''dev''' if mode == '''test''' else mode
UpperCAmelCase__ = self._feature_file(UpperCamelCase__ )
logger.info('Loading features from cached file %s' ,UpperCamelCase__ )
UpperCAmelCase__ = torch.load(UpperCamelCase__ )
UpperCAmelCase__ = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
UpperCAmelCase__ = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
UpperCAmelCase__ = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ) ,batch_size=UpperCamelCase__ ,shuffle=UpperCamelCase__ ,)
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCAmelCase__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
UpperCAmelCase__ = self(**UpperCamelCase__ )
UpperCAmelCase__ = outputs[:2]
UpperCAmelCase__ = logits.detach().cpu().numpy()
UpperCAmelCase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
UpperCAmelCase__ = np.concatenate([x['pred'] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
UpperCAmelCase__ = np.argmax(UpperCamelCase__ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
UpperCAmelCase__ = np.squeeze(UpperCamelCase__ )
UpperCAmelCase__ = np.concatenate([x['target'] for x in outputs] ,axis=0 )
UpperCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCAmelCase__ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task ,UpperCamelCase__ ,UpperCamelCase__ )}
UpperCAmelCase__ = dict(results.items() )
UpperCAmelCase__ = results
return ret, preds_list, out_label_list
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = self._eval_end(UpperCamelCase__ )
UpperCAmelCase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = self._eval_end(UpperCamelCase__ )
UpperCAmelCase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ):
BaseTransformer.add_model_specific_args(UpperCamelCase__ ,UpperCamelCase__ )
parser.add_argument(
'--max_seq_length' ,default=128 ,type=UpperCamelCase__ ,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) ,)
parser.add_argument(
'--task' ,default='' ,type=UpperCamelCase__ ,required=UpperCamelCase__ ,help='The GLUE task to run' ,)
parser.add_argument(
'--gpus' ,default=0 ,type=UpperCamelCase__ ,help='The number of GPUs allocated for this, it is by default 0 meaning none' ,)
parser.add_argument(
'--overwrite_cache' ,action='store_true' ,help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
UpperCAmelCase__ = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
UpperCAmelCase__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCAmelCase__ = os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
UpperCAmelCase__ = GLUETransformer(__UpperCamelCase )
UpperCAmelCase__ = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCAmelCase__ = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCamelCase ) )
UpperCAmelCase__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 711
|
"""simple docstring"""
import random
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : str ):
UpperCAmelCase__ = [ord(lowerCamelCase__ ) for i in text]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i in plain:
UpperCAmelCase__ = random.randint(1 ,300 )
UpperCAmelCase__ = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ):
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ : Dict = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 632
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__a)
class _UpperCAmelCase ( __a):
def __init__( self , **_A ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a_ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(a_ )
def __call__( self , _A , _A = None , **_A , ) -> str:
'''simple docstring'''
if "text_queries" in kwargs:
_UpperCAmelCase : List[Any] = kwargs.pop("""text_queries""" )
if isinstance(a_ , (str, Image.Image) ):
_UpperCAmelCase : int = {'image': image, 'candidate_labels': candidate_labels}
else:
_UpperCAmelCase : int = image
_UpperCAmelCase : List[Any] = super().__call__(a_ , **a_ )
return results
def __snake_case ( self , **_A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = {}
if "threshold" in kwargs:
_UpperCAmelCase : Tuple = kwargs['threshold']
if "top_k" in kwargs:
_UpperCAmelCase : int = kwargs['top_k']
return {}, {}, postprocess_params
def __snake_case ( self , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = load_image(inputs["""image"""] )
_UpperCAmelCase : Dict = inputs['candidate_labels']
if isinstance(a_ , a_ ):
_UpperCAmelCase : Any = candidate_labels.split(""",""" )
_UpperCAmelCase : List[str] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(a_ ):
_UpperCAmelCase : Optional[Any] = self.tokenizer(a_ , return_tensors=self.framework )
_UpperCAmelCase : int = self.image_processor(a_ , return_tensors=self.framework )
yield {
"is_last": i == len(a_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __snake_case ( self , _A ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = model_inputs.pop("""target_size""" )
_UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_label""" )
_UpperCAmelCase : List[str] = model_inputs.pop("""is_last""" )
_UpperCAmelCase : Union[str, Any] = self.model(**a_ )
_UpperCAmelCase : Any = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def __snake_case ( self , _A , _A=0.1 , _A=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
for model_output in model_outputs:
_UpperCAmelCase : Any = model_output['candidate_label']
_UpperCAmelCase : Dict = BaseModelOutput(a_ )
_UpperCAmelCase : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=a_ , threshold=a_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase : Optional[Any] = outputs['scores'][index].item()
_UpperCAmelCase : Tuple = self._get_bounding_box(outputs["""boxes"""][index][0] )
_UpperCAmelCase : str = {'score': score, 'label': label, 'box': box}
results.append(a_ )
_UpperCAmelCase : Optional[Any] = sorted(a_ , key=lambda _A : x["score"] , reverse=a_ )
if top_k:
_UpperCAmelCase : str = results[:top_k]
return results
def __snake_case ( self , _A ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
_UpperCAmelCase : Union[str, Any] = box.int().tolist()
_UpperCAmelCase : Any = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 238
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = '▁'
a = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
a = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
a = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
a = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class a_ ( snake_case ):
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : List[int] = []
UpperCAmelCase : List[int] = []
def __init__( self : int , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[Any]=None , a_ : str=None , a_ : Tuple="<s>" , a_ : int="</s>" , a_ : int="</s>" , a_ : Any="<pad>" , a_ : Union[str, Any]="<unk>" , a_ : Optional[Any]="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Dict=8 , **a_ : Union[str, Any] , ) -> None:
snake_case: Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
snake_case: Union[str, Any] =language_codes
snake_case: Optional[int] =FAIRSEQ_LANGUAGE_CODES[language_codes]
snake_case: Dict ={lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
snake_case: Optional[int] =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a_ )
for lang_code in fairseq_language_code
if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , )
snake_case: int =vocab_file
snake_case: int =load_json(a_ )
snake_case: Tuple ={v: k for k, v in self.encoder.items()}
snake_case: Any =spm_file
snake_case: int =load_spm(a_ , self.sp_model_kwargs )
snake_case: List[Any] =len(self.encoder )
snake_case: Optional[Any] ={
self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ )
}
snake_case: List[str] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )}
snake_case: int ={v: k for k, v in self.lang_token_to_id.items()}
snake_case: Any =src_lang if src_lang is not None else 'en'
snake_case: Optional[int] =tgt_lang
snake_case: Optional[int] =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
snake_case: str =num_madeup_words
@property
def UpperCamelCase ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self : List[Any] , a_ : str ) -> None:
snake_case: Optional[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self : Optional[int] , a_ : str ) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCamelCase ( self : List[Any] , a_ : Optional[int] ) -> Union[str, Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def UpperCamelCase ( self : str , a_ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a_ , self.unk_token )
def UpperCamelCase ( self : int , a_ : Optional[Any] ) -> Tuple:
snake_case: List[Any] =[]
snake_case: List[str] =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
snake_case: Optional[Any] =[]
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCamelCase ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
snake_case: List[Any] =[1] * len(self.prefix_tokens )
snake_case: Optional[int] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def UpperCamelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self : Any ) -> Dict:
snake_case: List[str] ={self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Dict:
snake_case: Optional[int] =self.__dict__.copy()
snake_case: List[Any] =None
return state
def __setstate__( self : int , a_ : Dict ) -> None:
snake_case: Any =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Optional[int] ={}
snake_case: List[str] =load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase ( self : str , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
snake_case: Tuple =Path(a_ )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
snake_case: Union[str, Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
snake_case: List[Any] =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , 'wb' ) as fi:
snake_case: Any =self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def UpperCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : List[str] , ) -> BatchEncoding:
snake_case: List[str] =src_lang
snake_case: Optional[int] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def UpperCamelCase ( self : Union[str, Any] , a_ : int , a_ : Optional[str] , a_ : Optional[str] , **a_ : Union[str, Any] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case: List[str] =src_lang
snake_case: List[str] =self(a_ , add_special_tokens=a_ , **a_ )
snake_case: List[Any] =self.get_lang_id(a_ )
snake_case: List[str] =tgt_lang_id
return inputs
def UpperCamelCase ( self : str ) -> List[str]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self : Dict ) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self : List[str] , a_ : str ) -> None:
snake_case: Optional[Any] =self.get_lang_token(a_ )
snake_case: Optional[Any] =self.lang_token_to_id[lang_token]
snake_case: Optional[Any] =[self.cur_lang_id]
snake_case: Optional[int] =[self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , a_ : str ) -> None:
snake_case: Dict =self.get_lang_token(a_ )
snake_case: Tuple =self.lang_token_to_id[lang_token]
snake_case: Optional[int] =[self.cur_lang_id]
snake_case: List[str] =[self.eos_token_id]
def UpperCamelCase ( self : Union[str, Any] , a_ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase ( self : int , a_ : str ) -> int:
snake_case: Union[str, Any] =self.get_lang_token(a_ )
return self.lang_token_to_id[lang_token]
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
snake_case: Dict =sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def a_ ( __UpperCAmelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
"""simple docstring"""
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 350
| 0
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase ( __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = torch.load(__A , map_location="""cpu""" )
if "model" in sd.keys():
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case : List[Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case : int = sd.pop(__A )
snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case : List[str] = sd[key]
# We split QKV in separate Q,K,V
snake_case : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case : Any = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case : List[str] = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case , snake_case , snake_case : str = torch.split(__A , depth // 3 , dim=0 )
snake_case : Tuple = q
snake_case : List[Any] = k
snake_case : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str]=None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = load_checkpoint(__A )
if config is not None:
snake_case : List[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case : Any = OPTConfig()
snake_case : Union[str, Any] = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 315
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase ( __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = torch.load(__A , map_location="""cpu""" )
if "model" in sd.keys():
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case : List[Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case : int = sd.pop(__A )
snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case : List[str] = sd[key]
# We split QKV in separate Q,K,V
snake_case : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case : Any = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case : List[str] = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case , snake_case , snake_case : str = torch.split(__A , depth // 3 , dim=0 )
snake_case : Tuple = q
snake_case : List[Any] = k
snake_case : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str]=None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = load_checkpoint(__A )
if config is not None:
snake_case : List[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case : Any = OPTConfig()
snake_case : Union[str, Any] = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 315
| 1
|
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__A = logging.get_logger(__name__)
def _A ( lowercase__ , lowercase__ , lowercase__ ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = to_pil_image(lowercase__ )
lowercase__ , lowercase__ = pil_image.size
lowercase__ = pytesseract.image_to_data(lowercase__ , lang=lowercase__ , output_type="""dict""" , config=lowercase__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
lowercase__ = [idx for idx, word in enumerate(lowercase__ ) if not word.strip()]
lowercase__ = [word for idx, word in enumerate(lowercase__ ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase__ = []
for x, y, w, h in zip(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = [x, y, x + w, y + h]
actual_boxes.append(lowercase__ )
# finally, normalize the bounding boxes
lowercase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase__ , lowercase__ , lowercase__ ) )
assert len(lowercase__ ) == len(lowercase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A ( __UpperCAmelCase ):
lowerCamelCase : Optional[int] = ["""pixel_values"""]
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = "" , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
lowercase__ = size if size is not None else {"""height""": 224, """width""": 224}
lowercase__ = get_size_dict(lowerCamelCase__ )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_value
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase__ = apply_ocr
lowercase__ = ocr_lang
lowercase__ = tesseract_config
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
lowercase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase__ = (size["""height"""], size["""width"""])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(lowerCamelCase__ )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowercase__ = []
lowercase__ = []
for image in images:
lowercase__ , lowercase__ = apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
lowercase__ = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
lowercase__ = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
lowercase__ = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
lowercase__ = words_batch
lowercase__ = boxes_batch
return data
| 325
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=10 , lowerCamelCase__=[10, 20, 30, 40] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCamelCase : Any = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 325
| 1
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
SCREAMING_SNAKE_CASE : int = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"jukebox": 512,
}
class lowerCamelCase( __lowerCAmelCase ):
lowercase_ : int = VOCAB_FILES_NAMES
lowercase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Tuple = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase_ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=["v3", "v2", "v2"], lowerCamelCase=5_12, lowerCamelCase=5, lowerCamelCase="<|endoftext|>", **lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = AddedToken(lowerCAmelCase_, lstrip=lowerCAmelCase_, rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_, lowerCAmelCase_) else unk_token
super().__init__(
unk_token=lowerCAmelCase_, n_genres=lowerCAmelCase_, version=lowerCAmelCase_, max_n_lyric_tokens=lowerCAmelCase_, **lowerCAmelCase_, )
_lowercase : Any = version
_lowercase : Any = max_n_lyric_tokens
_lowercase : Any = n_genres
with open(lowerCAmelCase_, encoding='utf-8') as vocab_handle:
_lowercase : Tuple = json.load(lowerCAmelCase_)
with open(lowerCAmelCase_, encoding='utf-8') as vocab_handle:
_lowercase : str = json.load(lowerCAmelCase_)
with open(lowerCAmelCase_, encoding='utf-8') as vocab_handle:
_lowercase : List[str] = json.load(lowerCAmelCase_)
_lowercase : Optional[Any] = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder) == 79:
_lowercase : Optional[Any] = oov.replace(R'\-\'', R'\-+\'')
_lowercase : List[str] = regex.compile(lowerCAmelCase_)
_lowercase : Optional[int] = {v: k for k, v in self.artists_encoder.items()}
_lowercase : List[Any] = {v: k for k, v in self.genres_encoder.items()}
_lowercase : Union[str, Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return dict(self.artists_encoder, self.genres_encoder, self.lyrics_encoder)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = [self.artists_encoder.get(lowerCAmelCase_, 0) for artist in list_artists]
for genres in range(len(lowerCAmelCase_)):
_lowercase : Any = [self.genres_encoder.get(lowerCAmelCase_, 0) for genre in list_genres[genres]]
_lowercase : List[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres]))
_lowercase : Union[str, Any] = [[self.lyrics_encoder.get(lowerCAmelCase_, 0) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
return list(lowerCAmelCase_)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Union[str, Any] = self.prepare_for_tokenization(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_)
_lowercase : Dict = self._tokenize(lowerCAmelCase_)
return artist, genre, lyrics
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = False) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version)):
if self.version[idx] == "v3":
_lowercase : Union[str, Any] = artists[idx].lower()
_lowercase : Optional[int] = [genres[idx].lower()]
else:
_lowercase : Optional[int] = self._normalize(artists[idx]) + '.v2'
_lowercase : Optional[int] = [
self._normalize(lowerCAmelCase_) + '.v2' for genre in genres[idx].split('_')
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_lowercase : Dict = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+')
_lowercase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
_lowercase : Optional[Any] = {vocab[index]: index + 1 for index in range(len(lowerCAmelCase_))}
_lowercase : Dict = 0
_lowercase : str = len(lowerCAmelCase_) + 1
_lowercase : Tuple = self.vocab
_lowercase : Any = {v: k for k, v in self.vocab.items()}
_lowercase : Tuple = ''
else:
_lowercase : Optional[Any] = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+')
_lowercase : List[str] = self._run_strip_accents(lowerCAmelCase_)
_lowercase : str = lyrics.replace('\\', '\n')
_lowercase : List[str] = self.out_of_vocab.sub('', lowerCAmelCase_), [], []
return artists, genres, lyrics
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : List[Any] = unicodedata.normalize('NFD', lowerCAmelCase_)
_lowercase : Tuple = []
for char in text:
_lowercase : List[Any] = unicodedata.category(lowerCAmelCase_)
if cat == "Mn":
continue
output.append(lowerCAmelCase_)
return "".join(lowerCAmelCase_)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = (
[chr(lowerCAmelCase_) for i in range(ord('a'), ord('z') + 1)]
+ [chr(lowerCAmelCase_) for i in range(ord('A'), ord('Z') + 1)]
+ [chr(lowerCAmelCase_) for i in range(ord('0'), ord('9') + 1)]
+ ['.']
)
_lowercase : str = frozenset(lowerCAmelCase_)
_lowercase : Dict = re.compile(R'_+')
_lowercase : int = ''.join([c if c in accepted else '_' for c in text.lower()])
_lowercase : Dict = pattern.sub('_', lowerCAmelCase_).strip('_')
return text
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
return " ".join(lowerCAmelCase_)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(lowerCAmelCase_, lowerCAmelCase_):
_lowercase : List[str] = TensorType(lowerCAmelCase_)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')
import tensorflow as tf
_lowercase : List[str] = tf.constant
_lowercase : str = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')
import torch
_lowercase : Optional[Any] = torch.tensor
_lowercase : Tuple = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')
import jax.numpy as jnp # noqa: F811
_lowercase : str = jnp.array
_lowercase : List[str] = _is_jax
else:
_lowercase : Optional[int] = np.asarray
_lowercase : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_lowercase : Optional[Any] = [inputs]
if not is_tensor(lowerCAmelCase_):
_lowercase : Any = as_tensor(lowerCAmelCase_)
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.')
return inputs
def __call__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase="", lowerCamelCase="pt") -> BatchEncoding:
"""simple docstring"""
_lowercase : List[Any] = [0, 0, 0]
_lowercase : List[str] = [artist] * len(self.version)
_lowercase : Tuple = [genres] * len(self.version)
_lowercase , _lowercase , _lowercase : int = self.tokenize(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_)
_lowercase , _lowercase , _lowercase : Any = self._convert_token_to_id(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_)
_lowercase : Any = [-INFINITY] * len(full_tokens[-1])
_lowercase : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=lowerCAmelCase_)
for i in range(len(self.version))
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks})
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : List[Any] = os.path.join(
lowerCAmelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'])
with open(lowerCAmelCase_, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.artists_encoder, ensure_ascii=lowerCAmelCase_))
_lowercase : str = os.path.join(
lowerCAmelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'])
with open(lowerCAmelCase_, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.genres_encoder, ensure_ascii=lowerCAmelCase_))
_lowercase : str = os.path.join(
lowerCAmelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'])
with open(lowerCAmelCase_, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.lyrics_encoder, ensure_ascii=lowerCAmelCase_))
return (artists_file, genres_file, lyrics_file)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Any = self.artists_decoder.get(lowerCAmelCase_)
_lowercase : str = [self.genres_decoder.get(lowerCAmelCase_) for genre in genres_index]
_lowercase : Dict = [self.lyrics_decoder.get(lowerCAmelCase_) for character in lyric_index]
return artist, genres, lyrics
| 717
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase="None", lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : Optional[int] = use_token_type_ids
_lowercase : str = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : Tuple = num_choices
_lowercase : Dict = relative_attention
_lowercase : Optional[int] = position_biased_input
_lowercase : str = pos_att_type
_lowercase : Optional[Any] = scope
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Union[str, Any] = None
if self.use_input_mask:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowercase : Union[str, Any] = None
_lowercase : Tuple = None
_lowercase : str = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()), [])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = DebertaVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)[0]
_lowercase : Optional[int] = model(lowerCamelCase, token_type_ids=lowerCamelCase)[0]
_lowercase : Dict = model(lowerCamelCase)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = DebertaVaForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.num_labels
_lowercase : Any = DebertaVaForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Optional[int] = DebertaVaForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = DebertaVaForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = DebertaVaForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[Any] = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : str = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Any = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = config_and_inputs
_lowercase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Any = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int = True
lowercase_ : str = False
lowercase_ : str = False
lowercase_ : str = False
lowercase_ : List[Any] = False
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = DebertaVaModelTester(self)
_lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = DebertaVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge')
_lowercase : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_lowercase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowercase : Tuple = model(lowerCamelCase, attention_mask=lowerCamelCase)[0]
# compare the actual values for a slice.
_lowercase : int = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCamelCase, atol=1E-4), F'''{output[:, 1:4, 1:4]}''')
| 354
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase : Dict = 16
_UpperCAmelCase : str = 32
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : int = 16 , lowercase__ : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['''lr''']
lowerCAmelCase__ = int(config['''num_epochs'''] )
lowerCAmelCase__ = int(config['''seed'''] )
lowerCAmelCase__ = int(config['''batch_size'''] )
lowerCAmelCase__ = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase__ = 1
lowerCAmelCase__ = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase__ = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ = 0
# Now we train the model
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ = 0
lowerCAmelCase__ = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase__ )
lowerCAmelCase__ = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
lowerCAmelCase__ = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(lowercase__ , lowercase__ )
def lowerCAmelCase_ () -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=lowercase__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=lowercase__ , )
parser.add_argument(
'''--output_dir''' , type=lowercase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=lowercase__ , default=lowercase__ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowercase__ , default=3 , help='''Number of train epochs.''' , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 668
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = str(__UpperCamelCase )
return n == n[::-1]
def lowerCAmelCase ( __UpperCamelCase = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__A = 0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 215
|
"""simple docstring"""
lowercase_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 215
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : int = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = """data2vec-text"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : Any=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-1_2 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 427
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return "".join(chr(ord(snake_case_ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 427
| 1
|
'''simple docstring'''
from typing import Any
def __A ( a_ : list ):
if not input_list:
return []
lowerCAmelCase : Dict = [input_list.count(a_ ) for value in input_list]
lowerCAmelCase : int = max(a_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
def __init__( self , a_ , a_=2 , a_=True , a_=False , a_=10 , a_=3 , a_=32 * 8 , a_=32 * 8 , a_=4 , a_=64 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : str = is_training
lowerCAmelCase : Any = use_auxiliary_loss
lowerCAmelCase : Dict = num_queries
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_size
lowerCAmelCase : Any = max_size
lowerCAmelCase : int = num_labels
lowerCAmelCase : Tuple = hidden_dim
lowerCAmelCase : Optional[Any] = hidden_dim
def _lowerCamelCase ( self ):
lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a_ )
lowerCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a_ )
lowerCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a_ ) > 0.5
).float()
lowerCAmelCase : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=a_ ) > 0.5).long()
lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase : Optional[Any] = self.num_queries
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : str = [1, 1, 1, 1]
lowerCAmelCase : Tuple = self.num_channels
lowerCAmelCase : str = 64
lowerCAmelCase : Any = 128
lowerCAmelCase : List[str] = self.hidden_dim
lowerCAmelCase : Optional[Any] = self.hidden_dim
lowerCAmelCase : int = self.hidden_dim
return config
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : str = output.encoder_hidden_states
lowerCAmelCase : List[str] = output.pixel_decoder_hidden_states
lowerCAmelCase : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , config.decoder_layers )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_=False ):
with torch.no_grad():
lowerCAmelCase : Tuple = MaskaFormerModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Tuple = model(pixel_values=a_ , pixel_mask=a_ )
lowerCAmelCase : str = model(a_ , output_hidden_states=a_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a_ , a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
lowerCAmelCase : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=a_ )
model.to(a_ )
model.eval()
def comm_check_on_output(a_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase : List[str] = model(pixel_values=a_ , pixel_mask=a_ )
lowerCAmelCase : Union[str, Any] = model(a_ )
comm_check_on_output(a_ )
lowerCAmelCase : str = model(
pixel_values=a_ , pixel_mask=a_ , mask_labels=a_ , class_labels=a_ )
comm_check_on_output(a_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = MaskaFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a_ , **a_ , output_hidden_states=a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*a_ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(a_ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[str] = [*signature.parameters.keys()]
lowerCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
@slow
def _lowerCamelCase ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase : int = MaskaFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase : Union[str, Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=a_ ),
"mask_labels": torch.randn((2, 10, *size) , device=a_ ),
"class_labels": torch.zeros(2 , 10 , device=a_ ).long(),
}
lowerCAmelCase : Optional[Any] = self.model_tester.get_config()
lowerCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation(a_ ).to(a_ )
lowerCAmelCase : Dict = model(**a_ )
self.assertTrue(outputs.loss is not None )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a_ , **a_ , output_hidden_states=a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class(a_ ).to(a_ )
lowerCAmelCase : Union[str, Any] = model(**a_ , output_attentions=a_ )
self.assertTrue(outputs.attentions is not None )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase : List[Any] = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
lowerCAmelCase : Union[str, Any] = model(a_ , mask_labels=a_ , class_labels=a_ ).loss
loss.backward()
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Tuple = True
lowerCAmelCase : int = True
lowerCAmelCase : int = model_class(a_ ).to(a_ )
model.train()
lowerCAmelCase : Any = model(a_ , mask_labels=a_ , class_labels=a_ )
lowerCAmelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase = 1e-4
def __A ( ):
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowerCamelCase ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowerCamelCase ( self ):
lowerCAmelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(a_ )
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : Any = image_processor(a_ , return_tensors="pt" ).to(a_ )
lowerCAmelCase : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : Dict = model(**a_ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
lowerCAmelCase : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a_ , atol=a_ ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a_ ).eval()
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : Tuple = image_processor(a_ , return_tensors="pt" ).to(a_ )
lowerCAmelCase : Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : List[str] = model(**a_ )
# masks_queries_logits
lowerCAmelCase : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase : Tuple = torch.tensor(a_ ).to(a_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a_ , atol=a_ ) )
# class_queries_logits
lowerCAmelCase : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=a_ ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a_ ).eval()
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase : Optional[int] = inputs["pixel_values"].to(a_ )
lowerCAmelCase : int = [el.to(a_ ) for el in inputs["mask_labels"]]
lowerCAmelCase : Union[str, Any] = [el.to(a_ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**a_ )
self.assertTrue(outputs.loss is not None )
| 551
| 1
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowercase (datasets.BuilderConfig ):
"""simple docstring"""
_UpperCAmelCase = None
class __lowercase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_UpperCAmelCase = PandasConfig
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE_ : Dict = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[int] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ : List[str] = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase__ ) )
yield i, self._cast_table(lowerCAmelCase__ )
| 101
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 187
| 0
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : str , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 53
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pix2struct_text_model'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , UpperCAmelCase__ : Any=50_244 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Union[str, Any]=64 , UpperCAmelCase__ : Union[str, Any]=2_048 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Optional[int]=128 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[str]=1e-6 , UpperCAmelCase__ : Dict=1.0 , UpperCAmelCase__ : Any="gelu_new" , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Any=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Union[str, Any]:
'''simple docstring'''
A__ = vocab_size
A__ = hidden_size
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = use_cache
A__ = eos_token_id
A__ = decoder_start_token_id
# for backwards compatibility
A__ = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , is_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Any) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
A__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pix2struct_vision_model'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : int=2_048 , UpperCAmelCase__ : int=64 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Union[str, Any]=1e-6 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Any=1e-10 , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : Optional[int]=4_096 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : int=128 , **UpperCAmelCase__ : Dict , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = patch_embed_hidden_size
A__ = d_ff
A__ = dropout_rate
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = dense_act_fn
A__ = seq_len
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Tuple) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''') == "pix2struct":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''pix2struct'''
UpperCAmelCase__ = True
def __init__( self : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(tie_word_embeddings=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
if text_config is None:
A__ = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''')
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''')
A__ = PixaStructTextConfig(**UpperCAmelCase__)
A__ = PixaStructVisionConfig(**UpperCAmelCase__)
A__ = self.text_config.decoder_start_token_id
A__ = self.text_config.pad_token_id
A__ = self.text_config.eos_token_id
A__ = initializer_factor
A__ = initializer_range
A__ = self.initializer_range
A__ = self.initializer_range
A__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , UpperCAmelCase__ : PixaStructTextConfig , UpperCAmelCase__ : PixaStructVisionConfig , **UpperCAmelCase__ : Dict) ->Tuple:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 87
|
class UpperCamelCase_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None:
'''simple docstring'''
A__ = row
A__ = col
A__ = graph
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None:
'''simple docstring'''
A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands.
'''simple docstring'''
A__ = [[False for j in range(self.COL)] for i in range(self.ROW)]
A__ = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
count += 1
return count
| 87
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_UpperCAmelCase : Optional[int] = re.compile(R"""\s+""")
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCamelCase__ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [len(UpperCamelCase__ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=5 ):
'''simple docstring'''
snake_case_ = ['auto-generated', 'autogenerated', 'automatically generated']
snake_case_ = example['content'].splitlines()
for _, line in zip(range(UpperCamelCase__ ) , UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=5 , UpperCamelCase__=0.05 ):
'''simple docstring'''
snake_case_ = ['unit tests', 'test file', 'configuration file']
snake_case_ = example['content'].splitlines()
snake_case_ = 0
snake_case_ = 0
# first test
for _, line in zip(range(UpperCamelCase__ ) , UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ = example['content'].count('\n' )
snake_case_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = ['def ', 'class ', 'for ', 'while ']
snake_case_ = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=4 ):
'''simple docstring'''
snake_case_ = example['content'].splitlines()
snake_case_ = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids']
snake_case_ = len(example['content'] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not check_uniques(UpperCamelCase__ , UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
with open(UpperCamelCase__ , 'rb' ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
_UpperCAmelCase : Optional[Any] = HfArgumentParser(PreprocessingArguments)
_UpperCAmelCase : List[Any] = parser.parse_args()
if args.num_workers is None:
_UpperCAmelCase : int = multiprocessing.cpu_count()
_UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_UpperCAmelCase : Any = time.time()
_UpperCAmelCase : int = load_dataset(args.dataset_name, split="""train""")
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_UpperCAmelCase : Dict = time.time()
_UpperCAmelCase : str = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_UpperCAmelCase : Optional[int] = set(ds.unique("""hash"""))
_UpperCAmelCase : Tuple = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_UpperCAmelCase : List[Any] = time.time()
_UpperCAmelCase : Optional[int] = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_UpperCAmelCase : str = time.time()
_UpperCAmelCase , _UpperCAmelCase : int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_UpperCAmelCase : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
_UpperCAmelCase : str = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
_UpperCAmelCase : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_UpperCAmelCase : List[str] = str(data_dir / F'''file-{file_number+1:012}.json''')
_UpperCAmelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 108
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
'''simple docstring'''
model.train()
snake_case_ = model(UpperCamelCase__ )
snake_case_ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(UpperCamelCase__ )
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
snake_case_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ = RegressionDataset(length=96 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 108
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : int=1 ) -> str:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-single' , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Optional[int] ) -> Any:
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def lowerCamelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
A_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
A_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
A_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 115
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 645
| 0
|
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase :
def __init__( self : Tuple ) -> Tuple:
lowerCamelCase__ : str = psutil.Process()
lowerCamelCase__ : int = False
def A_ ( self : Optional[Any] ) -> Any:
lowerCamelCase__ : int = -1
while True:
lowerCamelCase__ : Tuple = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : str ) -> List[Any]:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : List[Any] = threading.Thread(target=self.peak_monitor )
lowerCamelCase__ : List[Any] = True
self.thread.start()
def A_ ( self : Dict ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase : Any = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Time
lowerCamelCase__ : Optional[int] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : List[str] = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
# Time
lowerCamelCase__ : Dict = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : Any = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase__ : Any = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : str = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
lowerCamelCase__ : Tuple = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
lowerCamelCase__ : Dict = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 188
|
import socket
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : int = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase__ : Optional[int] = socket.gethostname()
lowerCamelCase__ : Optional[int] = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowerCamelCase__ : str = sock.recv(1024 )
if not data:
break
out_file.write(_UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 188
| 1
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : float , UpperCamelCase__ : Callable , UpperCamelCase__ : int , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : str = None , ) -> Dict:
"""simple docstring"""
super().__init__()
snake_case : Optional[Any] = initial_learning_rate
snake_case : Optional[Any] = warmup_steps
snake_case : List[str] = power
snake_case : Tuple = decay_schedule_fn
snake_case : str = name
def __call__( self : List[str] , UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case : Optional[int] = tf.cast(UpperCamelCase__ , tf.floataa )
snake_case : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
snake_case : Optional[int] = global_step_float / warmup_steps_float
snake_case : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase__ , )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 0.9 , SCREAMING_SNAKE_CASE__ = 0.999 , SCREAMING_SNAKE_CASE__ = 1E-8 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = None , ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=SCREAMING_SNAKE_CASE__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=SCREAMING_SNAKE_CASE__ , )
if num_warmup_steps:
snake_case : Union[str, Any] = WarmUp(
initial_learning_rate=SCREAMING_SNAKE_CASE__ , decay_schedule_fn=SCREAMING_SNAKE_CASE__ , warmup_steps=SCREAMING_SNAKE_CASE__ , )
if weight_decay_rate > 0.0:
snake_case : List[Any] = AdamWeightDecay(
learning_rate=SCREAMING_SNAKE_CASE__ , weight_decay_rate=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , epsilon=SCREAMING_SNAKE_CASE__ , clipnorm=SCREAMING_SNAKE_CASE__ , global_clipnorm=SCREAMING_SNAKE_CASE__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=SCREAMING_SNAKE_CASE__ , )
else:
snake_case : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , beta_a=SCREAMING_SNAKE_CASE__ , epsilon=SCREAMING_SNAKE_CASE__ , clipnorm=SCREAMING_SNAKE_CASE__ , global_clipnorm=SCREAMING_SNAKE_CASE__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase__ : float = 0.9 , UpperCamelCase__ : float = 0.999 , UpperCamelCase__ : float = 1e-7 , UpperCamelCase__ : bool = False , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "AdamWeightDecay" , **UpperCamelCase__ : str , ) -> int:
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
snake_case : Dict = weight_decay_rate
snake_case : Any = include_in_weight_decay
snake_case : Optional[Any] = exclude_from_weight_decay
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = {'''WarmUp''': WarmUp}
return super(UpperCamelCase__ , cls ).from_config(UpperCamelCase__ , custom_objects=UpperCamelCase__ )
def lowerCAmelCase ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
super(UpperCamelCase__ , self )._prepare_local(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : str = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
snake_case : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case ,snake_case : str = list(zip(*UpperCamelCase__ ) )
return super(UpperCamelCase__ , self ).apply_gradients(zip(UpperCamelCase__ , UpperCamelCase__ ) , name=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case : Dict = apply_state or {}
snake_case : Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case : Tuple = self._fallback_apply_state(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=None ) -> Dict:
"""simple docstring"""
snake_case ,snake_case : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ )
snake_case : int = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase__ , self )._resource_apply_dense(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case ,snake_case : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ )
snake_case : Optional[Any] = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase__ , self )._resource_apply_sparse(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None:
return False
return True
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case : List[Any] = []
snake_case : Optional[Any] = None
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self._accum_steps is None:
snake_case : Optional[int] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Any , UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
if not self._gradients:
snake_case : Optional[int] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase__ ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase__ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase__ )}' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase__ )
self._accum_steps.assign_add(1 )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase__ ) )
| 638
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase__ = TypeVar("T")
class snake_case__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : list[T] , UpperCamelCase__ : Callable[[T, T], T] ) -> None:
"""simple docstring"""
snake_case : Any | T = None
snake_case : int = len(UpperCamelCase__ )
snake_case : list[T] = [any_type for _ in range(self.N )] + arr
snake_case : Optional[int] = fnc
self.build()
def lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
snake_case : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : T ) -> None:
"""simple docstring"""
p += self.N
snake_case : int = v
while p > 1:
snake_case : List[Any] = p // 2
snake_case : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> T | None: # noqa: E741
"""simple docstring"""
snake_case ,snake_case : Union[str, Any] = l + self.N, r + self.N
snake_case : T | None = None
while l <= r:
if l % 2 == 1:
snake_case : str = self.st[l] if res is None else self.fn(UpperCamelCase__ , self.st[l] )
if r % 2 == 0:
snake_case : Tuple = self.st[r] if res is None else self.fn(UpperCamelCase__ , self.st[r] )
snake_case ,snake_case : Dict = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase__ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase__ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase__ = SegmentTree(test_array, min)
lowercase__ = SegmentTree(test_array, max)
lowercase__ = SegmentTree(test_array, lambda a, b: a + b)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case : int = reduce(SCREAMING_SNAKE_CASE__ , test_array[i : j + 1] )
snake_case : List[str] = reduce(SCREAMING_SNAKE_CASE__ , test_array[i : j + 1] )
snake_case : Tuple = reduce(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert max_range == max_segment_tree.query(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert sum_range == sum_segment_tree.query(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_all_segments()
for index, value in test_updates.items():
lowercase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 638
| 1
|
'''simple docstring'''
import itertools
import math
def __magic_name__( lowerCamelCase):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(SCREAMING_SNAKE_CASE_) + 1), 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__( ):
__lowerCAmelCase = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_):
yield num
num += 1
def __magic_name__( lowerCamelCase = 1_0_0_0_1):
return next(itertools.islice(prime_generator(), nth - 1, SCREAMING_SNAKE_CASE_))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 700
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase : Tuple = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __magic_name__( lowerCamelCase, lowerCamelCase=None):
require_version(deps[pkg], lowerCamelCase)
| 474
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[Any] = '''megatron-bert'''
def __init__( self : str , __lowerCamelCase : int=2_9_0_5_6 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : int=2_4 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : Optional[Any]=4_0_9_6 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=5_1_2 , __lowerCamelCase : str=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : Dict=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Any="absolute" , __lowerCamelCase : Optional[int]=True , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
| 103
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case = '''pt'''
elif is_tf_available():
snake_case = '''tf'''
else:
snake_case = '''jax'''
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Union[str, Any] = PerceiverTokenizer
A__ : Optional[int] = False
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
super().setUp()
_snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=False , __lowerCamelCase : int=2_0 , __lowerCamelCase : List[Any]=5 ):
"""simple docstring"""
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_snake_case = []
for i in range(len(__lowerCamelCase ) ):
try:
_snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case = list(filter(lambda __lowerCamelCase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowerCamelCase ) )
_snake_case = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
_snake_case = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
_snake_case = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case = [t[0] for t in toks]
# Ensure consistency
_snake_case = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
_snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
_snake_case = ''' ''' + output_txt
_snake_case = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.perceiver_tokenizer
_snake_case = '''Unicode €.'''
_snake_case = tokenizer(__lowerCamelCase )
_snake_case = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCamelCase )
# decoding
_snake_case = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , '''[CLS]Unicode €.[SEP]''' )
_snake_case = tokenizer('''e è é ê ë''' )
_snake_case = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowerCamelCase )
# decoding
_snake_case = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.perceiver_tokenizer
_snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
_snake_case = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
_snake_case = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
if FRAMEWORK != "jax":
_snake_case = list(batch.input_ids.numpy()[0] )
else:
_snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.perceiver_tokenizer
_snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_snake_case = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowerCamelCase )
self.assertIn('''attention_mask''' , __lowerCamelCase )
self.assertNotIn('''decoder_input_ids''' , __lowerCamelCase )
self.assertNotIn('''decoder_attention_mask''' , __lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.perceiver_tokenizer
_snake_case = [
'''Summary of the text.''',
'''Another summary.''',
]
_snake_case = tokenizer(
text_target=__lowerCamelCase , max_length=3_2 , padding='''max_length''' , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
# safety check on max_len default value so we are sure the test works
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ''' He is very happy, UNwant\u00E9d,running'''
_snake_case = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
_snake_case = tokenizer.__class__.from_pretrained(__lowerCamelCase )
_snake_case = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
_snake_case = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
_snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
_snake_case = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
_snake_case = tokenizer.__class__.from_pretrained(__lowerCamelCase )
_snake_case = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_snake_case = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
_snake_case = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
_snake_case = json.load(__lowerCamelCase )
_snake_case = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
_snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
_snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowerCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(
__lowerCamelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowerCamelCase )]
_snake_case = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_snake_case = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
_snake_case = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
| 103
| 1
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_lowercase : Optional[int] =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
A__ = 42
A__ = None
@staticmethod
def lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase ( self : int , __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCAmelCase ( cls : Any ) -> Union[str, Any]:
"""simple docstring"""
return F'''`pip install {cls.pip_package or cls.name}`'''
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'optuna'
@staticmethod
def lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
return is_optuna_available()
def lowerCAmelCase ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
return run_hp_search_optuna(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : Tuple , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return default_hp_space_optuna(__UpperCamelCase )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'ray'
A__ = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase ( ) -> int:
"""simple docstring"""
return is_ray_available()
def lowerCAmelCase ( self : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return run_hp_search_ray(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return default_hp_space_ray(__UpperCamelCase )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'sigopt'
@staticmethod
def lowerCAmelCase ( ) -> int:
"""simple docstring"""
return is_sigopt_available()
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return run_hp_search_sigopt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return default_hp_space_sigopt(__UpperCamelCase )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'wandb'
@staticmethod
def lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return is_wandb_available()
def lowerCAmelCase ( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return run_hp_search_wandb(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return default_hp_space_wandb(__UpperCamelCase )
_lowercase : Optional[int] ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __UpperCAmelCase ( ) -> List[str]:
snake_case__ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_SCREAMING_SNAKE_CASE ) > 0:
snake_case__ : int = available_backends[0].name
if len(_SCREAMING_SNAKE_CASE ) > 1:
logger.info(
F'''{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 707
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase : List[Any] =None
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : Tuple ={"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Optional[Any] ={
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
_lowercase : int ={
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_lowercase : Tuple ="▁"
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = AlbertTokenizer
def __init__( self : Optional[Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : int="[CLS]" , __UpperCamelCase : List[Any]="[SEP]" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Optional[int]="[SEP]" , __UpperCamelCase : str="<pad>" , __UpperCamelCase : Any="[CLS]" , __UpperCamelCase : List[str]="[MASK]" , **__UpperCamelCase : str , ) -> int:
"""simple docstring"""
snake_case__ : str = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : str = do_lower_case
snake_case__ : int = remove_space
snake_case__ : Optional[Any] = keep_accents
snake_case__ : List[Any] = vocab_file
snake_case__ : Optional[Any] = False if not self.vocab_file else True
def lowerCAmelCase ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case__ : str = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : int = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 574
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : List[str]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Any=None , ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = parent
SCREAMING_SNAKE_CASE_ :int = 13
SCREAMING_SNAKE_CASE_ :Any = 7
SCREAMING_SNAKE_CASE_ :Tuple = True
SCREAMING_SNAKE_CASE_ :Tuple = True
SCREAMING_SNAKE_CASE_ :str = True
SCREAMING_SNAKE_CASE_ :str = True
SCREAMING_SNAKE_CASE_ :Optional[Any] = 99
SCREAMING_SNAKE_CASE_ :int = 3_84
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ :Any = 4
SCREAMING_SNAKE_CASE_ :Any = 37
SCREAMING_SNAKE_CASE_ :List[Any] = "gelu"
SCREAMING_SNAKE_CASE_ :Dict = 0.1
SCREAMING_SNAKE_CASE_ :int = 0.1
SCREAMING_SNAKE_CASE_ :Optional[Any] = 5_12
SCREAMING_SNAKE_CASE_ :Tuple = 16
SCREAMING_SNAKE_CASE_ :List[str] = 2
SCREAMING_SNAKE_CASE_ :Any = 0.02
SCREAMING_SNAKE_CASE_ :str = 3
SCREAMING_SNAKE_CASE_ :List[str] = 4
SCREAMING_SNAKE_CASE_ :Any = 1_28
SCREAMING_SNAKE_CASE_ :str = 2
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 9
SCREAMING_SNAKE_CASE_ :Any = 1
SCREAMING_SNAKE_CASE_ :int = None
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ :int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ :str = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ :str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
SCREAMING_SNAKE_CASE_ :Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ :str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Dict):
SCREAMING_SNAKE_CASE_ :Optional[Any] = TFConvBertModel(config=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ :Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ :Optional[int] = model(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = model(UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self : int , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple):
SCREAMING_SNAKE_CASE_ :Optional[int] = TFConvBertForMaskedLM(config=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ :List[Any] = model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _snake_case ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str):
SCREAMING_SNAKE_CASE_ :Tuple = self.num_labels
SCREAMING_SNAKE_CASE_ :List[Any] = TFConvBertForSequenceClassification(config=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ :List[str] = model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _snake_case ( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict):
SCREAMING_SNAKE_CASE_ :int = self.num_choices
SCREAMING_SNAKE_CASE_ :Tuple = TFConvBertForMultipleChoice(config=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = tf.tile(tf.expand_dims(UpperCAmelCase , 1) , (1, self.num_choices, 1))
SCREAMING_SNAKE_CASE_ :List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase , 1) , (1, self.num_choices, 1))
SCREAMING_SNAKE_CASE_ :Optional[Any] = tf.tile(tf.expand_dims(UpperCAmelCase , 1) , (1, self.num_choices, 1))
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ :List[Any] = model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Any):
SCREAMING_SNAKE_CASE_ :List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ :List[Any] = TFConvBertForTokenClassification(config=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ :Any = model(UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _snake_case ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]):
SCREAMING_SNAKE_CASE_ :int = TFConvBertForQuestionAnswering(config=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ :List[str] = model(UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) :int = config_and_inputs
SCREAMING_SNAKE_CASE_ :str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowercase , lowercase , unittest.TestCase ):
lowerCamelCase_ : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase_ : int = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : List[Any] = False
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = TFConvBertModelTester(self)
SCREAMING_SNAKE_CASE_ :Dict = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37)
def _snake_case ( self : Union[str, Any]):
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase)
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase)
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase)
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase)
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase)
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase)
@slow
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :int = True
SCREAMING_SNAKE_CASE_ :List[str] = True
if hasattr(UpperCAmelCase , "use_cache"):
SCREAMING_SNAKE_CASE_ :Optional[int] = True
SCREAMING_SNAKE_CASE_ :List[Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = getattr(self.model_tester , "key_length" , UpperCAmelCase)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = model_class(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = len(model(UpperCAmelCase))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = os.path.join(UpperCAmelCase , "saved_model" , "1")
SCREAMING_SNAKE_CASE_ :Dict = tf.keras.models.load_model(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = model(UpperCAmelCase)
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ :Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ :Tuple = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ :Tuple = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ :Optional[int] = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase) , UpperCAmelCase)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :List[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
self.assertIsNotNone(UpperCAmelCase)
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :str = True
SCREAMING_SNAKE_CASE_ :Union[str, Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length)
SCREAMING_SNAKE_CASE_ :int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length)
SCREAMING_SNAKE_CASE_ :List[Any] = getattr(self.model_tester , "key_length" , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = getattr(self.model_tester , "key_length" , UpperCAmelCase)
def check_decoder_attentions_output(UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Any = len(UpperCAmelCase)
self.assertEqual(out_len % 2 , 0)
SCREAMING_SNAKE_CASE_ :Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :Optional[int] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :List[Any] = True
SCREAMING_SNAKE_CASE_ :List[str] = False
SCREAMING_SNAKE_CASE_ :str = model_class(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase))
SCREAMING_SNAKE_CASE_ :Dict = len(UpperCAmelCase)
self.assertEqual(config.output_hidden_states , UpperCAmelCase)
check_encoder_attentions_output(UpperCAmelCase)
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ :str = model_class(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase))
self.assertEqual(config.output_hidden_states , UpperCAmelCase)
check_decoder_attentions_output(UpperCAmelCase)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ :int = True
SCREAMING_SNAKE_CASE_ :str = model_class(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase))
self.assertEqual(config.output_hidden_states , UpperCAmelCase)
check_encoder_attentions_output(UpperCAmelCase)
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ :Any = True
SCREAMING_SNAKE_CASE_ :int = True
SCREAMING_SNAKE_CASE_ :int = model_class(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase))
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase)
check_encoder_attentions_output(UpperCAmelCase)
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base")
SCREAMING_SNAKE_CASE_ :str = tf.constant([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE_ :Tuple = model(UpperCAmelCase)[0]
SCREAMING_SNAKE_CASE_ :Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :str = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
])
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4)
| 631
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = ["a", "b", "c"]
# Defaults to last layer if both are None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = get_aligned_output_features_output_indices(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["c"])
self.assertEqual(UpperCAmelCase , [2])
# Out indices set to match out features
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = get_aligned_output_features_output_indices(["a", "c"] , UpperCAmelCase , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["a", "c"])
self.assertEqual(UpperCAmelCase , [0, 2])
# Out features set to match out indices
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = get_aligned_output_features_output_indices(UpperCAmelCase , [0, 2] , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["a", "c"])
self.assertEqual(UpperCAmelCase , [0, 2])
# Out features selected from negative indices
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = get_aligned_output_features_output_indices(UpperCAmelCase , [-3, -1] , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["a", "c"])
self.assertEqual(UpperCAmelCase , [-3, -1])
def _snake_case ( self : str):
# Stage names must be set
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0, 1) , UpperCAmelCase)
# Out features must be a list
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"])
# Out features must be a subset of stage names
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"])
# Out indices must be a list or tuple
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(UpperCAmelCase , 0 , ["a", "b"])
# Out indices must be a subset of stage names
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(UpperCAmelCase , (0, 1) , ["a"])
# Out features and out indices must be the same length
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"])
# Out features should match out indices
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"])
# Out features and out indices should be in order
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"])
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"])
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :int = BackboneMixin()
SCREAMING_SNAKE_CASE_ :List[str] = ["a", "b", "c"]
SCREAMING_SNAKE_CASE_ :str = ["a", "c"]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
SCREAMING_SNAKE_CASE_ :Tuple = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"])
self.assertEqual(backbone.out_indices , [0, 1])
SCREAMING_SNAKE_CASE_ :List[str] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [-3, -1])
| 631
| 1
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case ( enum.Enum ):
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
@add_end_docstrings(__UpperCAmelCase )
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self :Optional[Any] , *_lowerCamelCase :Optional[Any] , **_lowerCamelCase :Optional[Any] ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__SCREAMING_SNAKE_CASE : Tuple = None
if self.model.config.prefix is not None:
__SCREAMING_SNAKE_CASE : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__SCREAMING_SNAKE_CASE : Dict = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self._sanitize_parameters(prefix=__UpperCamelCase , **self._forward_params )
__SCREAMING_SNAKE_CASE : List[Any] = {**self._preprocess_params, **preprocess_params}
__SCREAMING_SNAKE_CASE : Any = {**self._forward_params, **forward_params}
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :str=None , _lowerCamelCase :str=None , _lowerCamelCase :Tuple=None , _lowerCamelCase :Any=None , _lowerCamelCase :Dict=None , _lowerCamelCase :Tuple=None , _lowerCamelCase :Any=None , _lowerCamelCase :List[str]=None , **_lowerCamelCase :List[Any] , ):
__SCREAMING_SNAKE_CASE : str = {}
if prefix is not None:
__SCREAMING_SNAKE_CASE : Dict = prefix
if prefix:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
__UpperCamelCase , padding=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE : Optional[Any] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = handle_long_generation
preprocess_params.update(__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = generate_kwargs
__SCREAMING_SNAKE_CASE : Dict = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__SCREAMING_SNAKE_CASE : Optional[int] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__SCREAMING_SNAKE_CASE : Optional[int] = ReturnType.TENSORS
if return_type is not None:
__SCREAMING_SNAKE_CASE : int = return_type
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
__SCREAMING_SNAKE_CASE : int = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__SCREAMING_SNAKE_CASE : Dict = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , *_lowerCamelCase :Optional[Any] , **_lowerCamelCase :List[Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*__UpperCamelCase , **__UpperCamelCase )
def __call__( self :Optional[Any] , _lowerCamelCase :Dict , **_lowerCamelCase :List[str] ):
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :List[Any] , _lowerCamelCase :List[str]="" , _lowerCamelCase :Tuple=None , **_lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
prefix + prompt_text , padding=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
__SCREAMING_SNAKE_CASE : List[Any] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__SCREAMING_SNAKE_CASE : Union[str, Any] = generate_kwargs['''max_new_tokens''']
else:
__SCREAMING_SNAKE_CASE : int = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE : int = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__SCREAMING_SNAKE_CASE : int = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__SCREAMING_SNAKE_CASE : Dict = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Optional[Any] , **_lowerCamelCase :List[Any] ):
__SCREAMING_SNAKE_CASE : Dict = model_inputs['''input_ids''']
__SCREAMING_SNAKE_CASE : Any = model_inputs.get('''attention_mask''' , __UpperCamelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
else:
__SCREAMING_SNAKE_CASE : List[str] = input_ids.shape[0]
__SCREAMING_SNAKE_CASE : Optional[int] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__SCREAMING_SNAKE_CASE : str = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__SCREAMING_SNAKE_CASE : str = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__SCREAMING_SNAKE_CASE : List[Any] = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__SCREAMING_SNAKE_CASE : Optional[int] = self.model.generate(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , **__UpperCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = generated_sequence.shape[0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE : Optional[Any] = generated_sequence.reshape(__UpperCamelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE : Tuple = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :int , _lowerCamelCase :Dict=ReturnType.FULL_TEXT , _lowerCamelCase :Any=True ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['''generated_sequence'''][0]
__SCREAMING_SNAKE_CASE : Any = model_outputs['''input_ids''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['''prompt_text''']
__SCREAMING_SNAKE_CASE : Dict = generated_sequence.numpy().tolist()
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__SCREAMING_SNAKE_CASE : Dict = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__SCREAMING_SNAKE_CASE : int = 0
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__SCREAMING_SNAKE_CASE : str = prompt_text + text[prompt_length:]
else:
__SCREAMING_SNAKE_CASE : List[str] = text[prompt_length:]
__SCREAMING_SNAKE_CASE : Tuple = {'''generated_text''': all_text}
records.append(__UpperCamelCase )
return records
| 707
|
"""simple docstring"""
from math import pow, sqrt
def lowerCAmelCase_ ( *lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = len(lowercase_ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCAmelCase_ ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase_ , lowercase_ , lowercase_ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 401
| 0
|
from __future__ import annotations
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Dict = data
_snake_case : Node | None = None
_snake_case : Node | None = None
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase_ ( lowerCAmelCase: Node )-> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase_ ( )-> None: # Main function for testing.
_snake_case : Tuple = Node(1 )
_snake_case : Optional[int] = Node(2 )
_snake_case : Dict = Node(3 )
_snake_case : List[str] = Node(4 )
_snake_case : str = Node(5 )
_snake_case : Optional[int] = Node(6 )
_snake_case : Any = Node(7 )
_snake_case : Dict = Node(8 )
_snake_case : Optional[int] = Node(9 )
print(is_full_binary_tree(lowerCAmelCase ) )
print(depth_of_tree(lowerCAmelCase ) )
print('Tree is: ' )
display(lowerCAmelCase )
if __name__ == "__main__":
main()
| 411
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =SpeechTaTokenizer
a_ : Dict =False
a_ : List[Any] =True
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Union[str, Any] = SpeechTaTokenizer(UpperCamelCase )
_snake_case : Tuple = AddedToken('<mask>' , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
_snake_case : Union[str, Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : str , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'this is a test'
_snake_case : Optional[int] = 'this is a test'
return input_text, output_text
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=False , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : Optional[int]=5 ):
'''simple docstring'''
_snake_case , _snake_case : str = self.get_input_output_texts(UpperCamelCase )
_snake_case : str = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : List[str] = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
return text, ids
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = '<pad>'
_snake_case : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCamelCase ) , 81 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case : Any = tokenizer.vocab_size
_snake_case : Any = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : int = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_snake_case : List[Any] = tokenizer.add_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.vocab_size
_snake_case : List[Any] = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size + len(UpperCamelCase ) )
_snake_case : List[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case : Dict = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_snake_case : Dict = tokenizer.add_special_tokens(UpperCamelCase )
_snake_case : int = tokenizer.vocab_size
_snake_case : Tuple = len(UpperCamelCase )
self.assertNotEqual(UpperCamelCase , 0 )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , len(UpperCamelCase ) )
self.assertEqual(UpperCamelCase , all_size_a + len(UpperCamelCase ) )
_snake_case : str = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCamelCase )
self.assertGreaterEqual(len(UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : Union[str, Any] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCamelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_snake_case : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase )
# fmt: off
self.assertListEqual(UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_snake_case : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_snake_case : int = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCamelCase , )
| 411
| 1
|
"""simple docstring"""
import math
import sys
def lowerCamelCase_ ( _lowerCamelCase : Dict ):
lowerCamelCase_ = ""
try:
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as binary_file:
lowerCamelCase_ = binary_file.read()
for dat in data:
lowerCamelCase_ = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = {"0": "0", "1": "1"}
lowerCamelCase_ = "", ""
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCamelCase_ = lexicon[curr_string]
result += last_match_id
lowerCamelCase_ = last_match_id + "0"
if math.loga(__SCREAMING_SNAKE_CASE ).is_integer():
lowerCamelCase_ = {}
for curr_key in list(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = lexicon.pop(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = new_lex
lowerCamelCase_ = last_match_id + "1"
index += 1
lowerCamelCase_ = ""
return result
def lowerCamelCase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] ):
lowerCamelCase_ = 8
try:
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as opened_file:
lowerCamelCase_ = [
to_write[i : i + byte_length]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCamelCase_ = data_bits[counter:]
lowerCamelCase_ = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Any ):
lowerCamelCase_ = read_file_binary(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = remove_prefix(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = decompress_data(__SCREAMING_SNAKE_CASE )
write_file_binary(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 703
|
"""simple docstring"""
import argparse
import os
import re
__lowercase : Optional[int] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
__lowercase : Dict = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowercase : int = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowercase : Optional[Any] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowercase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowercase : Any = re.compile(r"""\[([^\]]+)\]""")
def lowerCamelCase_ ( _lowerCamelCase : List[str] ):
lowerCamelCase_ = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def lowerCamelCase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str]="" , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None ):
lowerCamelCase_ = 0
lowerCamelCase_ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
lowerCamelCase_ = ['''\n'''.join(lines[:index] )]
else:
lowerCamelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
lowerCamelCase_ = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ = []
else:
blocks.append('''\n'''.join(_lowerCamelCase ) )
lowerCamelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append('''\n'''.join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowerCamelCase_ ( _lowerCamelCase : int ):
def _inner(_lowerCamelCase : List[Any] ):
return key(_lowerCamelCase ).lower().replace('''_''' , '''''' )
return _inner
def lowerCamelCase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=None ):
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : Union[str, Any] ):
return x
if key is None:
lowerCamelCase_ = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
lowerCamelCase_ = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : List[Any] ):
lowerCamelCase_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowerCamelCase_ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] ) + "]"
lowerCamelCase_ = import_statement.split('''\n''' )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ = 2 if lines[1].strip() == '''[''' else 1
lowerCamelCase_ = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
lowerCamelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase_ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
lowerCamelCase_ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def lowerCamelCase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=True ):
with open(_lowerCamelCase , '''r''' ) as f:
lowerCamelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ = main_blocks[block_idx]
lowerCamelCase_ = block.split('''\n''' )
# Get to the start of the imports.
lowerCamelCase_ = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ = '''\n'''.join(block_lines[line_idx:-1] )
lowerCamelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
lowerCamelCase_ = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ = 0
lowerCamelCase_ = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(_lowerCamelCase , '''w''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Tuple=True ):
lowerCamelCase_ = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
lowerCamelCase_ = sort_imports(os.path.join(_lowerCamelCase , '''__init__.py''' ) , check_only=_lowerCamelCase )
if result:
lowerCamelCase_ = [os.path.join(_lowerCamelCase , '''__init__.py''' )]
if len(_lowerCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(_lowerCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
__lowercase : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__lowercase : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 66
| 0
|
"""simple docstring"""
import numpy
# List of input, output pairs
_SCREAMING_SNAKE_CASE : Tuple = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_SCREAMING_SNAKE_CASE : Dict = (((515, 22, 13), 555), ((61, 35, 49), 150))
_SCREAMING_SNAKE_CASE : Tuple = [2, 4, 1, 5]
_SCREAMING_SNAKE_CASE : Optional[int] = len(train_data)
_SCREAMING_SNAKE_CASE : Tuple = 0.009
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict="train" ) -> Tuple:
return calculate_hypothesis_value(_lowerCamelCase , _lowerCamelCase ) - output(
_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
lowerCamelCase_ = 0
for i in range(len(_lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Any ) -> Union[str, Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Any ) -> Optional[int]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=m ) -> Union[str, Any]:
lowerCamelCase_ = 0
for i in range(_lowerCamelCase ):
if index == -1:
summation_value += _error(_lowerCamelCase )
else:
summation_value += _error(_lowerCamelCase ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = summation_of_cost_derivative(_lowerCamelCase , _lowerCamelCase ) / m
return cost_derivative_value
def lowerCamelCase__ ( ) -> Tuple:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase_ = 0.00_00_02
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while True:
j += 1
lowerCamelCase_ = [0, 0, 0, 0]
for i in range(0 , len(_lowerCamelCase ) ):
lowerCamelCase_ = get_cost_derivative(i - 1 )
lowerCamelCase_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase , rtol=_lowerCamelCase , ):
break
lowerCamelCase_ = temp_parameter_vector
print(('Number of iterations:', j) )
def lowerCamelCase__ ( ) -> Dict:
for i in range(len(_lowerCamelCase ) ):
print(('Actual output value:', output(_lowerCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_lowerCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 549
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowercase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_lowercase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowercase = '''allenai'''
def _A (UpperCamelCase : Tuple ) ->Any:
'''simple docstring'''
lowerCamelCase__ : str = dict((re.sub(r"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : List[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
lowerCamelCase__ : List[str] = d[k] # restore
return da
def _A (UpperCamelCase : str , UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
assert os.path.exists(UpperCamelCase )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCamelCase__ : str = basename(UpperCamelCase )
lowerCamelCase__ : str = dirname(UpperCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : Tuple = cls.hub_models()
lowerCamelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowerCamelCase__ : List[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}" )
lowerCamelCase__ : List[str] = hub_utils.from_pretrained(
UpperCamelCase , UpperCamelCase , UpperCamelCase , archive_map=UpperCamelCase , **UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = vars(chkpt["""args"""]["""model"""] )
lowerCamelCase__ : Union[str, Any] = args["""source_lang"""]
lowerCamelCase__ : Dict = args["""target_lang"""]
lowerCamelCase__ : List[Any] = dirname(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(UpperCamelCase )
# dicts
lowerCamelCase__ : int = os.path.join(UpperCamelCase , f"dict.{src_lang}.txt" )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , f"dict.{tgt_lang}.txt" )
lowerCamelCase__ : Any = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : Optional[Any] = len(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , """vocab-src.json""" )
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Dict = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : Tuple = False
break
lowerCamelCase__ : Union[str, Any] = Dictionary.load(UpperCamelCase )
lowerCamelCase__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : str = len(UpperCamelCase )
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , """vocab-tgt.json""" )
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.exists(UpperCamelCase ):
break
with open(UpperCamelCase , encoding="""utf-8""" ) as fin:
lowerCamelCase__ : Dict = fin.read()
lowerCamelCase__ : Tuple = re.sub(r""" \d+$""" , """""" , UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"Generating {merges_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase )
# model config
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
lowerCamelCase__ : str = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowerCamelCase__ : Dict = 5
lowerCamelCase__ : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowerCamelCase__ : Union[str, Any] = 1.0
print(f"Generating {fsmt_model_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# tokenizer config
lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# model
lowerCamelCase__ : Tuple = chkpt["""models"""][0]
lowerCamelCase__ : Tuple = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : Tuple = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = FSMTConfig.from_pretrained(UpperCamelCase )
lowerCamelCase__ : List[str] = FSMTForConditionalGeneration(UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
# save
lowerCamelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(UpperCamelCase , UpperCamelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"cd {data_root}" )
print(f"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 157
| 0
|
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCAmelCase ( ) -> List[Any]:
__lowerCamelCase : List[str] = 9
__lowerCamelCase : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase : Dict = kruskal(A__ , A__ )
__lowerCamelCase : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A__ ) == sorted(A__ )
| 718
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : str = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Union[str, Any] = 'openai-gpt'
__a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __a=40478 , __a=512 , __a=768 , __a=12 , __a=12 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=1E-5 , __a=0.02 , __a="cls_index" , __a=True , __a=None , __a=True , __a=0.1 , **__a , ):
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Union[str, Any] = n_positions
__lowerCamelCase : Optional[Any] = n_embd
__lowerCamelCase : int = n_layer
__lowerCamelCase : List[str] = n_head
__lowerCamelCase : Dict = afn
__lowerCamelCase : Optional[Any] = resid_pdrop
__lowerCamelCase : int = embd_pdrop
__lowerCamelCase : int = attn_pdrop
__lowerCamelCase : Dict = layer_norm_epsilon
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Any = summary_type
__lowerCamelCase : Tuple = summary_use_proj
__lowerCamelCase : List[Any] = summary_activation
__lowerCamelCase : str = summary_first_dropout
__lowerCamelCase : Tuple = summary_proj_to_labels
super().__init__(**__a )
| 263
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = ["""image_processor""", """tokenizer"""]
_UpperCAmelCase = """FlavaImageProcessor"""
_UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE_ : int = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 101
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase : Optional[int] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , __magic_name__ )
if matches:
lowercase : Optional[int] = float(matches[1] )
lowercase : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase : Optional[int] = 10_01
lowercase : str = '''imagenet-1k-id2label.json'''
lowercase : Optional[Any] = '''huggingface/label-files'''
lowercase : Optional[int] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any = {int(__magic_name__ ) + 1: v for k, v in idalabel.items()}
lowercase : Any = '''background'''
lowercase : Any = idalabel
lowercase : Any = {v: k for k, v in idalabel.items()}
return config
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Tuple = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> int:
'''simple docstring'''
lowercase : int = get_mobilenet_va_config(__magic_name__ )
# Load 🤗 model
lowercase : Tuple = MobileNetVaForImageClassification(__magic_name__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__magic_name__ , __magic_name__ , __magic_name__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase : List[Any] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Optional[Any] = model(**__magic_name__ )
lowercase : int = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
lowercase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase : Optional[Any] = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase : int = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase : int = '''google/''' + model_name
image_processor.push_to_hub(__magic_name__ )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 217
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A ( lowercase__ : int , lowercase__ : Any , lowercase__ : Dict=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
UpperCamelCase__ :Dict = nn.Parameter(lowercase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
UpperCamelCase__ :Any = nn.Parameter(lowercase__ )
def A ( lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : List[Any] ) -> List[Any]:
# set torch weights for 1-to-1 comparison
UpperCamelCase__ :List[Any] = np.asarray(weights[0] )
UpperCamelCase__ :Dict = np.asarray(weights[1] )
UpperCamelCase__ :Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowercase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase__ ).view(-1 , lowercase__ ).contiguous().transpose(0 , 1 ) , )
def A ( lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Dict ) -> str:
# set torch weights for 1-to-1 comparison
UpperCamelCase__ :List[Any] = np.asarray(weights[0] )
UpperCamelCase__ :List[Any] = np.asarray(weights[1] )
UpperCamelCase__ :Optional[int] = np.asarray(weights[2] )
UpperCamelCase__ :List[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowercase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowercase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowercase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowercase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowercase__ ).view(-1 , lowercase__ ).contiguous().transpose(0 , 1 ) , )
def A ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : List[Any] ) -> Dict:
# layernorm 1
UpperCamelCase__ :Any = weights[0][0][0]
UpperCamelCase__ :List[str] = np.asarray(layer_norm_a[0] )
UpperCamelCase__ :str = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowercase__ ) , torch.tensor(lowercase__ ) , )
# lsh weights + output
UpperCamelCase__ :Optional[int] = weights[0][1]
if len(lowercase__ ) < 4:
set_layer_weights_in_torch_lsh(lowercase__ , torch_block.attention , lowercase__ )
else:
set_layer_weights_in_torch_local(lowercase__ , torch_block.attention , lowercase__ )
# intermediate weighs
UpperCamelCase__ :Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowercase__ ) == 4:
UpperCamelCase__ :List[Any] = intermediate_weights[2]
# layernorm 2
UpperCamelCase__ :Optional[Any] = np.asarray(intermediate_weights[0][0] )
UpperCamelCase__ :Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowercase__ ) , torch.tensor(lowercase__ ) , )
# intermediate dense
UpperCamelCase__ :str = np.asarray(intermediate_weights[1][0] )
UpperCamelCase__ :List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowercase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase__ ) , )
# intermediate out
UpperCamelCase__ :Any = np.asarray(intermediate_weights[4][0] )
UpperCamelCase__ :List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowercase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase__ ) , )
def A ( lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Optional[Any] ) -> str:
# reformer model
UpperCamelCase__ :Dict = torch_model.reformer
# word embeds
UpperCamelCase__ :Optional[int] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowercase__ ) , )
if isinstance(weights[3] , lowercase__ ):
UpperCamelCase__ :str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCamelCase__ :str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
UpperCamelCase__ :Optional[Any] = nn.Parameter(torch.tensor(lowercase__ ) )
UpperCamelCase__ :Dict = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowercase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCamelCase__ :Optional[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowercase__ , lowercase__ , lowercase__ )
# output layer norm
UpperCamelCase__ :Any = np.asarray(weights[7][0] )
UpperCamelCase__ :List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowercase__ ) , torch.tensor(lowercase__ ) , )
# output embeddings
UpperCamelCase__ :int = np.asarray(weights[9][0] )
UpperCamelCase__ :List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowercase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowercase__ ) , )
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : List[Any] ) -> Any:
# Initialise PyTorch model
UpperCamelCase__ :List[str] = ReformerConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase__ :Any = ReformerModelWithLMHead(lowercase__ )
with open(lowercase__ , """rb""" ) as f:
UpperCamelCase__ :Dict = pickle.load(lowercase__ )["""weights"""]
set_model_weights_in_torch(lowercase__ , lowercase__ , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 383
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __a ( cls :Optional[int] ):
UpperCamelCase__ :Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __a ( cls :List[str] ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def __a ( self :Any ):
UpperCamelCase__ :Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase__ :int = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
UpperCamelCase__ :Optional[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCamelCase__ :str = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ :Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ :Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ , repo_id="""test-model-flax""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ :Any = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCamelCase__ :List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ :Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ :Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=f"""{key} not identical""" )
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase__ :int = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
UpperCamelCase__ :List[str] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase__ :int = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ :Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ :int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ :List[str] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase__ :Optional[int] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ :List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ :Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1e-3 , msg=f"""{key} not identical""" )
def A ( lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :Tuple = flatten_dict(modela.params )
UpperCamelCase__ :int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCamelCase__ :Tuple = False
return models_are_equal
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase__ :List[str] = FlaxBertModel(lowerCamelCase__ )
UpperCamelCase__ :int = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase__ :Union[str, Any] = FlaxBertModel(lowerCamelCase__ )
UpperCamelCase__ :Any = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , max_shard_size="""10KB""" )
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Any = """bert"""
UpperCamelCase__ :int = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :str = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :int = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = """bert"""
UpperCamelCase__ :int = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 383
| 1
|
"""simple docstring"""
from __future__ import annotations
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> None:
UpperCamelCase = data
UpperCamelCase = None
UpperCamelCase = None
def __snake_case ( _lowercase ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __snake_case ( _lowercase ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def __snake_case ( _lowercase ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __snake_case ( ): # Main function for testing.
"""simple docstring"""
UpperCamelCase = Node(1 )
UpperCamelCase = Node(2 )
UpperCamelCase = Node(3 )
UpperCamelCase = Node(4 )
UpperCamelCase = Node(5 )
UpperCamelCase = Node(6 )
UpperCamelCase = Node(7 )
UpperCamelCase = Node(8 )
UpperCamelCase = Node(9 )
print(is_full_binary_tree(_lowercase ) )
print(depth_of_tree(_lowercase ) )
print('''Tree is: ''' )
display(_lowercase )
if __name__ == "__main__":
main()
| 34
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case_ : Any = """."""
if __name__ == "__main__":
snake_case_ : List[str] = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
snake_case_ : Any = []
snake_case_ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case_ : List[Any] = line.strip()
snake_case_ : List[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case_ : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 595
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : str = logging.get_logger(__name__)
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str]=False ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(__lowercase )
__lowerCAmelCase = val
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
__lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCAmelCase = 10_00
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = int(deit_name[-6:-4] )
__lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
__lowerCAmelCase = 1_92
__lowerCAmelCase = 7_68
__lowerCAmelCase = 12
__lowerCAmelCase = 3
elif deit_name[9:].startswith("small" ):
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
__lowerCAmelCase = 10_24
__lowerCAmelCase = 40_96
__lowerCAmelCase = 24
__lowerCAmelCase = 16
# load original model from timm
__lowerCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = timm_model.state_dict()
__lowerCAmelCase = create_rename_keys(__lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase , __lowercase )
# load HuggingFace model
__lowerCAmelCase = DeiTForImageClassificationWithTeacher(__lowercase ).eval()
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
__lowerCAmelCase = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__lowerCAmelCase = DeiTImageProcessor(size=__lowercase , crop_size=config.image_size )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(__lowercase )
__lowerCAmelCase = timm_model(__lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 703
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[str] = (CMStochasticIterativeScheduler,)
a : str = 1_0
def UpperCAmelCase_ ( self , **UpperCamelCase ) -> str:
__lowerCAmelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = 10
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = 1
scheduler.set_timesteps(UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
__lowerCAmelCase = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [39, 30, 12, 1, 0]
__lowerCAmelCase = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**UpperCamelCase )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 39
| 0
|
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Optional[Any]= int(snake_case_ )
if n_element < 1:
SCREAMING_SNAKE_CASE__: Union[str, Any]= ValueError('''a should be a positive number''' )
raise my_error
SCREAMING_SNAKE_CASE__: Any= [1]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= (0, 0, 0)
SCREAMING_SNAKE_CASE__: Optional[int]= 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ : List[str] = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ : List[str] = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 64
|
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = len(SCREAMING_SNAKE_CASE__ )
__a : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__a : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__a : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__a : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__a : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
| 0
|
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
snake_case_ = get_logger(__name__)
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=0 ) -> Optional[int]:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ : List[Any] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving model to {output_model_file}" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , F"{MODEL_NAME}_{model_index}" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving model to {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"Model saved to {ckpt_dir}" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Any:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(SCREAMING_SNAKE_CASE_ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
SCREAMING_SNAKE_CASE_ : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Loading model from {input_model_file}" )
SCREAMING_SNAKE_CASE_ : int = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ : List[Any] = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
SCREAMING_SNAKE_CASE_ : str = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Loading model from {input_model_file}" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ : Dict = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=SCREAMING_SNAKE_CASE_ , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , planner=DefaultLoadPlanner() , )
SCREAMING_SNAKE_CASE_ : Dict = state_dict["model"]
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Tuple:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ : List[Any] = FSDP.optim_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
SCREAMING_SNAKE_CASE_ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
SCREAMING_SNAKE_CASE_ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(SCREAMING_SNAKE_CASE_ ) , planner=DefaultSavePlanner() , )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> Tuple:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
SCREAMING_SNAKE_CASE_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ : int = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_ : Tuple = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
os.path.join(SCREAMING_SNAKE_CASE_ , F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ : Dict = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(SCREAMING_SNAKE_CASE_ ) , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = optim_state["optimizer"]
logger.info(F"Optimizer loaded from {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ : int = FSDP.optim_state_dict_to_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
optimizer.load_state_dict(SCREAMING_SNAKE_CASE_ )
| 711
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [1]
for i in range(2 , SCREAMING_SNAKE_CASE_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Dict = list(range(SCREAMING_SNAKE_CASE_ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ : Any = factorials.pop()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCamelCase_ : int = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCamelCase_ : List[str] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, """rb""") as fp:
UpperCamelCase_ : Any = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
UpperCamelCase_ : int = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCamelCase_ : Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
UpperCamelCase_ : Dict = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 331
|
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase)
if n > 1:
factors.append(_UpperCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 0
|
'''simple docstring'''
def UpperCAmelCase ( lowercase__ : int = 1000000 ):
'''simple docstring'''
a__ = set(range(3 , lowercase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase__ , lowercase__ ) ) )
a__ = [float(lowercase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase__ , limit + 1 , lowercase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 705
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : int =16
_lowercase : int =32
def UpperCAmelCase ( lowercase__ : Accelerator , lowercase__ : int = 16 , lowercase__ : str = "bert-base-cased" ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained(lowercase__ )
a__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
a__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
'''simple docstring'''
a__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config["""lr"""]
a__ = int(config["""num_epochs"""] )
a__ = int(config["""seed"""] )
a__ = int(config["""batch_size"""] )
a__ = args.model_name_or_path
set_seed(lowercase__ )
a__ , a__ = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
a__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
a__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ = 1
a__ = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
a__ = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the stating epoch so files are named properly
a__ = 0
# Now we train the model
a__ = evaluate.load("""glue""" , """mrpc""" )
a__ = 0
a__ = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
a__ = model(**lowercase__ )
a__ = outputs.loss
a__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**lowercase__ )
a__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a__ , a__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
a__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase__ )
a__ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
a__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase__ , default=lowercase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=3 , help="""Number of train epochs.""" , )
a__ = parser.parse_args()
a__ = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 412
| 0
|
"""simple docstring"""
class __A :
def __init__( self : Union[str, Any] ) -> str:
__magic_name__: Any = """"""
__magic_name__: Dict = """"""
__magic_name__: List[Any] = []
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : int , __snake_case : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__magic_name__: Tuple = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__magic_name__: Any = self.__min_dist_top_down_dp(__snake_case , n - 1 )
__magic_name__: int = self.__min_dist_top_down_dp(m - 1 , __snake_case )
__magic_name__: Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
__magic_name__: Union[str, Any] = 1 + min(__snake_case , __snake_case , __snake_case )
return self.dp[m][n]
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : str , __snake_case : str ) -> int:
__magic_name__: List[str] = worda
__magic_name__: str = worda
__magic_name__: List[Any] = [[-1 for _ in range(len(__snake_case ) )] for _ in range(len(__snake_case ) )]
return self.__min_dist_top_down_dp(len(__snake_case ) - 1 , len(__snake_case ) - 1 )
def lowerCamelCase__ ( self : Dict , __snake_case : str , __snake_case : str ) -> int:
__magic_name__: int = worda
__magic_name__: int = worda
__magic_name__: Dict = len(__snake_case )
__magic_name__: Union[str, Any] = len(__snake_case )
__magic_name__: List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__magic_name__: Union[str, Any] = j
elif j == 0: # second string is empty
__magic_name__: str = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__magic_name__: Dict = self.dp[i - 1][j - 1]
else:
__magic_name__: List[Any] = self.dp[i][j - 1]
__magic_name__: List[Any] = self.dp[i - 1][j]
__magic_name__: Optional[Any] = self.dp[i - 1][j - 1]
__magic_name__: List[str] = 1 + min(__snake_case , __snake_case , __snake_case )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
__lowerCamelCase = input('Enter the first string: ').strip()
__lowerCamelCase = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 96
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {"tokenizer_file": "tokenizer.json"}
__lowerCamelCase : List[str] = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class a ( UpperCamelCase_ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = ["""input_ids""", """attention_mask"""]
__lowercase = None
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase=False , __UpperCamelCase=False , **__UpperCamelCase , )-> str:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , **__UpperCamelCase , )
A__ : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
A__ : Union[str, Any] =getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
A__ : Optional[Any] =add_prefix_space
A__ : str =pre_tok_class(**__UpperCamelCase )
A__ : Tuple =add_prefix_space
def lowerCAmelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
'''simple docstring'''
A__ : Union[str, Any] =kwargs.get('''is_split_into_words''' , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
'''simple docstring'''
A__ : int =kwargs.get('''is_split_into_words''' , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''' )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
'''simple docstring'''
A__ : Tuple =self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase )-> List[int]:
'''simple docstring'''
A__ : str =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
A__ : Optional[int] =input_ids[-self.model_max_length :]
return input_ids
| 416
| 0
|
import qiskit
def _A( UpperCamelCase__ : List[str] = 2 ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = qubits
# Using Aer's simulator
__lowercase = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(A_ , A_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A_ ) ) , list(range(A_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowercase = qiskit.execute(A_ , A_ , shots=1000 )
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 710
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : UNetaDModel
UpperCamelCase_ : KarrasVeScheduler
def __init__( self : Tuple , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : KarrasVeScheduler ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : str , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , **lowerCamelCase__ : int , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
__lowercase = self.unet.config.sample_size
__lowercase = (batch_size, 3, img_size, img_size)
__lowercase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__lowercase = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__lowercase = self.scheduler.schedule[t]
__lowercase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__lowercase , __lowercase = self.scheduler.add_noise_to_input(lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__lowercase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__lowercase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__lowercase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__lowercase = self.scheduler.step_correct(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , step_output.prev_sample , step_output['''derivative'''] , )
__lowercase = step_output.prev_sample
__lowercase = (sample / 2 + 0.5).clamp(0 , 1 )
__lowercase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 362
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __A ( _A , _A ):
"""simple docstring"""
__a = torch.load(_A , map_location="cpu" )
__a = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__a = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__a = v
else:
__a = v
__a = chkpt["params"]
__a = {n: v for n, v in config.items() if not isinstance(_A , (torch.FloatTensor, numpy.ndarray) )}
__a = chkpt["dico_word2id"]
__a = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__a = pytorch_dump_folder_path + "/" + CONFIG_NAME
__a = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_A , _A )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , indent=2 ) + "\n" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(_A , indent=2 ) + "\n" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 197
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __A ( ):
"""simple docstring"""
__a = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_A , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_A , default=5 )
parser.add_argument("--batch_size" , type=_A , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_A , default=1 )
parser.add_argument("--freeze" , type=_A , default=_A )
parser.add_argument("--learning_rate" , type=_A , default=5E-4 )
parser.add_argument("--seed" , type=_A , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_A , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_A , default=10 )
parser.add_argument("--weight_decay" , type=_A , default=0.01 )
parser.add_argument("--output_dir" , type=_A , default="./results" )
return parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = load("""accuracy""")
def __A ( _A ):
"""simple docstring"""
__a , __a = eval_pred
__a = np.argmax(_A , axis=1 )
return metric.compute(predictions=_A , references=_A )
class A_ ( a_ ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
super().__init__()
__a = trainer
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : int ):
if control.should_evaluate:
__a = deepcopy(__SCREAMING_SNAKE_CASE )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __A ( ):
"""simple docstring"""
__a = get_args()
set_seed(args.seed )
__a = load_dataset("codeparrot/codecomplex" , split="train" )
__a = dataset.train_test_split(test_size=0.2 )
__a = train_test["test"].train_test_split(test_size=0.5 )
__a = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
__a = AutoTokenizer.from_pretrained(args.model_ckpt )
__a = tokenizer.eos_token
__a = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__a = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__a = False
__a = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_A ):
__a = tokenizer(example["src"] , truncation=_A , max_length=1024 )
__a = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__a = train_test_validation.map(
_A , batched=_A , remove_columns=train_test_validation["train"].column_names , )
__a = DataCollatorWithPadding(tokenizer=_A )
__a = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
__a = Trainer(
model=_A , args=_A , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_A , data_collator=_A , compute_metrics=_A , )
print("Training..." )
trainer.add_callback(CustomCallback(_A ) )
trainer.train()
if __name__ == "__main__":
main()
| 197
| 1
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase_ : Union[str, Any] = _symbol_database.Default()
UpperCAmelCase_ : List[Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
UpperCAmelCase_ : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase_ : Any = 45
UpperCAmelCase_ : Dict = 1581
UpperCAmelCase_ : Union[str, Any] = 1517
UpperCAmelCase_ : Optional[int] = 1570
UpperCAmelCase_ : Any = 1584
UpperCAmelCase_ : int = 1793
UpperCAmelCase_ : List[str] = 1795
UpperCAmelCase_ : str = 1916
UpperCAmelCase_ : Dict = 1864
UpperCAmelCase_ : Optional[Any] = 1905
UpperCAmelCase_ : Any = 1919
UpperCAmelCase_ : int = 2429
UpperCAmelCase_ : List[str] = 2208
UpperCAmelCase_ : Tuple = 2418
UpperCAmelCase_ : Optional[Any] = 2323
UpperCAmelCase_ : str = 2407
# @@protoc_insertion_point(module_scope)
| 176
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict , lowercase_ : str=3 , lowercase_ : Dict=7 , lowercase_ : Any=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Dict=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : int=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Any=512 , lowercase_ : List[Any]=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : Dict = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = FalconModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : str , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconModel(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE_ : Any = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE_ : int = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = FalconModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE_ : Optional[int] = alibi
self.model_tester.create_and_check_model(lowercase_ , *lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : int = '''single_label_classification'''
SCREAMING_SNAKE_CASE_ : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = FalconForCausalLM(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowercase_ , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Dict = model._convert_to_rw_cache(result.past_key_values)
SCREAMING_SNAKE_CASE_ : int = model._convert_cache_to_standard_format(lowercase_ , lowercase_)
for layer in range(len(lowercase_)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : Tuple = '''multi_label_classification'''
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE_ : int = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase_ , '''use_cache'''):
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_).to(lowercase_)
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE_ : Tuple = (
getattr(lowercase_ , '''decoder_layers''' , lowercase_)
or getattr(lowercase_ , '''num_decoder_layers''' , lowercase_)
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE_ : str = getattr(lowercase_ , '''num_kv_heads''' , config.num_attention_heads)
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowercase_ , '''d_model''' , config.hidden_size)
SCREAMING_SNAKE_CASE_ : Dict = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE_ : int = outputs['''past_key_values''']
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = inputs['''input_ids'''].shape
for i in range(lowercase_):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE_ : Dict = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''')
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''')
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=19)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowercase_)[0]
self.assertEqual(lowercase_ , lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : str = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , num_beams=2 , max_new_tokens=4)
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(device=lowercase_)
SCREAMING_SNAKE_CASE_ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE_ : Any = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 176
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , a__ : str , ):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = '''last'''
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def __snake_case ( self : str ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __snake_case ( self : Dict , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Optional[int] , a__ : int , a__ : Tuple , a__ : Optional[Any] , a__ : Union[str, Any] , ):
UpperCAmelCase = TFFlaubertModel(config=a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase = model(a__ )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : str , a__ : Tuple , a__ : Tuple , a__ : Optional[int] , a__ : str , a__ : Any , a__ : List[str] , a__ : str , a__ : List[str] , a__ : int , ):
UpperCAmelCase = TFFlaubertWithLMHeadModel(a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : int , a__ : int , ):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : Tuple , a__ : Dict , a__ : Dict , a__ : List[Any] , a__ : List[Any] , a__ : List[str] , a__ : Union[str, Any] , ):
UpperCAmelCase = TFFlaubertForSequenceClassification(a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Any , a__ : Optional[int] , a__ : Dict , a__ : Any , a__ : Optional[Any] , a__ : str , a__ : Optional[int] , a__ : Any , a__ : Any , a__ : List[Any] , ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=a__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Any , a__ : Any , a__ : Optional[int] , a__ : str , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Tuple , a__ : Tuple , ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=a__ )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : int ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
),
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase =(
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : str , a__ : Optional[int] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __snake_case ( self : int ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , emb_dim=37 )
def __snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a__ )
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a__ )
@slow
def __snake_case ( self : Dict ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase = model(a__ )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , a__ )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 51
|
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 ):
"""simple docstring"""
__lowercase = right or len(UpperCamelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCamelCase__ , UpperCamelCase__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 616
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCamelCase = 10
def _lowerCamelCase ( UpperCAmelCase_ : list[int] ) -> list[int]:
"""simple docstring"""
A__ = 1
A__ = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
A__ = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
A__ = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
A__ = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
A__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
import math
class UpperCamelCase__ :
"""simple docstring"""
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = 0.0
A__ = 0.0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> list[list[int | float]]:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A__ = SelfOrganizingMap()
A__ = 3
A__ = 0.5
for _ in range(UpperCAmelCase_ ):
for j in range(len(UpperCAmelCase_ ) ):
# training sample
A__ = training_samples[j]
# Compute the winning vector
A__ = self_organizing_map.get_winner(UpperCAmelCase_, UpperCAmelCase_ )
# Update the winning vector
A__ = self_organizing_map.update(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
# classify test sample
A__ = [0, 0, 0, 1]
A__ = self_organizing_map.get_winner(UpperCAmelCase_, UpperCAmelCase_ )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 562
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case__: Any ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case__: int ) -> Tuple:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase__ = ord(snake_case__ )
if not _is_chinese_char(snake_case__ ):
return 0
return 1
def UpperCamelCase_( snake_case__: Tuple ) -> int:
UpperCAmelCase__ = set()
for token in tokens:
UpperCAmelCase__ = len(snake_case__ ) > 1 and is_chinese(snake_case__ )
if chinese_word:
word_set.add(snake_case__ )
UpperCAmelCase__ = list(snake_case__ )
return word_list
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Dict ) -> List[str]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase__ = max([len(snake_case__ ) for w in chinese_word_set] )
UpperCAmelCase__ = bert_tokens
UpperCAmelCase__ = 0, len(snake_case__ )
while start < end:
UpperCAmelCase__ = True
if is_chinese(bert_word[start] ):
UpperCAmelCase__ = min(end - start , snake_case__ )
for i in range(snake_case__ , 1 , -1 ):
UpperCAmelCase__ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase__ = '''##''' + bert_word[j]
UpperCAmelCase__ = start + i
UpperCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Optional[Any] , snake_case__: str ) -> int:
UpperCAmelCase__ = []
for i in range(0 , len(snake_case__ ) , 1_00 ):
UpperCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
UpperCAmelCase__ = [get_chinese_word(snake_case__ ) for r in res]
ltp_res.extend(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
UpperCAmelCase__ = []
for i in range(0 , len(snake_case__ ) , 1_00 ):
UpperCAmelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(snake_case__ ) == len(snake_case__ )
UpperCAmelCase__ = []
for input_ids, chinese_word in zip(snake_case__ , snake_case__ ):
UpperCAmelCase__ = []
for id in input_ids:
UpperCAmelCase__ = bert_tokenizer._convert_id_to_token(snake_case__ )
input_tokens.append(snake_case__ )
UpperCAmelCase__ = add_sub_symbol(snake_case__ , snake_case__ )
UpperCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__ ):
if token[:2] == "##":
UpperCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ):
ref_id.append(snake_case__ )
ref_ids.append(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
return ref_ids
def UpperCamelCase_( snake_case__: Tuple ) -> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase__ = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase__ = prepare_ref(snake_case__ , snake_case__ , snake_case__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCAmelCase__ = [json.dumps(snake_case__ ) + '''\n''' for ref in ref_ids]
f.writelines(snake_case__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
_UpperCamelCase = parser.parse_args()
main(args)
| 146
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74
| 0
|
def __snake_case ( _UpperCAmelCase ):
if num < 0:
return False
__a = num
__a = 0
while num > 0:
__a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__snake_case :Union[str, Any] =imread(r'digital_image_processing/image_data/lena_small.jpg')
__snake_case :List[str] =cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
A = cn.convert_to_negative(lowerCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase__ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A = canny.canny(lowerCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
assert gg.gaussian_filter(lowerCAmelCase__ , 5 , sigma=0.9 ).all()
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A = conv.img_convolve(lowerCAmelCase__ , lowerCAmelCase__ ).astype(lowerCAmelCase__ )
assert res.any()
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
assert med.median_filter(lowerCAmelCase__ , 3 ).any()
def lowerCamelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
A , A = sob.sobel_filter(lowerCAmelCase__ )
assert grad.any() and theta.any()
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
A = sp.make_sepia(lowerCAmelCase__ , 20 )
assert sepia.all()
def lowerCamelCase_ ( lowerCAmelCase__ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Any:
'''simple docstring'''
A = bs.Burkes(imread(lowerCAmelCase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase_ ( lowerCAmelCase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
'''simple docstring'''
A = rs.NearestNeighbour(imread(lowerCAmelCase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
A = imread(lowerCAmelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A = 0
A = 0
A = image[x_coordinate][y_coordinate]
A = lbp.get_neighbors_pixel(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A = lbp.local_binary_value(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert lbp_image.any()
| 106
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str | Literal[False]:
'''simple docstring'''
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
UpperCAmelCase = []
while True:
UpperCAmelCase = ['''$'''] * len(UpperCamelCase__ )
UpperCAmelCase = []
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
UpperCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase = '''*'''
UpperCAmelCase = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase__ ) == 0:
return pi
UpperCAmelCase = list(set(UpperCamelCase__ ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
UpperCAmelCase = []
for minterm in minterms:
UpperCAmelCase = ''''''
for _ in range(UpperCamelCase__ ):
UpperCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase__ )
return temp
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = list(UpperCamelCase__ )
UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> list[str]:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = [0] * len(UpperCamelCase__ )
for i in range(len(chart[0] ) ):
UpperCAmelCase = 0
UpperCAmelCase = -1
for j in range(len(UpperCamelCase__ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase = j
if count == 1:
UpperCAmelCase = 1
for i in range(len(UpperCamelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = 0
for i in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase = count_n
UpperCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = 0
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> list[list[int]]:
'''simple docstring'''
UpperCAmelCase = [[0 for x in range(len(UpperCamelCase__ ) )] for x in range(len(UpperCamelCase__ ) )]
for i in range(len(UpperCamelCase__ ) ):
UpperCAmelCase = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , UpperCamelCase__ ):
UpperCAmelCase = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
UpperCAmelCase = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase = [
float(UpperCamelCase__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase = decimal_to_binary(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = check(UpperCamelCase__ )
print('''Prime Implicants are:''' )
print(UpperCamelCase__ )
UpperCAmelCase = prime_implicant_chart(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = selection(UpperCamelCase__ , UpperCamelCase__ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 130
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_55 , __UpperCAmelCase=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A : List[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
A : List[Any] = parent
A : Dict = batch_size
A : Optional[Any] = num_channels
A : Union[str, Any] = min_resolution
A : int = max_resolution
A : Optional[int] = do_resize
A : Dict = size
A : List[Any] = do_normalize
A : int = image_mean
A : List[str] = image_std
A : Optional[int] = do_rescale
A : Any = rescale_factor
A : int = do_pad
def snake_case ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
A : List[Any] = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
A : Tuple = image.size
else:
A : Dict = image.shape[1], image.shape[2]
if w < h:
A : str = int(self.size['''shortest_edge'''] * h / w )
A : Dict = self.size['''shortest_edge''']
elif w > h:
A : Union[str, Any] = self.size['''shortest_edge''']
A : Any = int(self.size['''shortest_edge'''] * w / h )
else:
A : Any = self.size['''shortest_edge''']
A : Union[str, Any] = self.size['''shortest_edge''']
else:
A : Optional[Any] = []
for image in image_inputs:
A : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A : Optional[int] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
A : Union[str, Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Dict = ConditionalDetrImageProcessor if is_vision_available() else None
def snake_case ( self ) -> Optional[int]:
A : str = ConditionalDetrImageProcessingTester(self )
@property
def snake_case ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ) -> str:
A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def snake_case ( self ) -> List[Any]:
A : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
A : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ) -> Dict:
pass
def snake_case ( self ) -> Tuple:
# Initialize image_processing
A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Optional[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> Optional[Any]:
# Initialize image_processing
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : Any = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ) -> List[str]:
# Initialize image_processing
A : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A : Dict = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
A : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ) -> Optional[Any]:
# prepare image and target
A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
A : int = json.loads(f.read() )
A : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
A : Tuple = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
A : Union[str, Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : Tuple = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
A : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def snake_case ( self ) -> Tuple:
# prepare image, target and masks_path
A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
A : Optional[Any] = json.loads(f.read() )
A : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
A : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
A : Dict = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
A : List[str] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
A : Any = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
A : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
A : int = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
A : List[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
A : int = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
A : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
A : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
A : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
A : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
A : int = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
| 714
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : List[Any] = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : Any = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def snake_case__ ( lowerCamelCase_ ):
A : int = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' )
def snake_case__ ( lowerCamelCase_ ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
A : int = _readaa(lowerCamelCase_ )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
A : Tuple = _readaa(lowerCamelCase_ )
A : Union[str, Any] = _readaa(lowerCamelCase_ )
A : Tuple = _readaa(lowerCamelCase_ )
A : Tuple = bytestream.read(rows * cols * num_images )
A : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
A : Dict = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , '''Please use tf.one_hot on tensors.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = labels_dense.shape[0]
A : List[Any] = numpy.arange(lowerCamelCase_ ) * num_classes
A : Any = numpy.zeros((num_labels, num_classes) )
A : Optional[Any] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , '''Please use tf.data to implement this functionality.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
A : Optional[int] = _readaa(lowerCamelCase_ )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
A : List[str] = _readaa(lowerCamelCase_ )
A : Optional[Any] = bytestream.read(lowerCamelCase_ )
A : List[str] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class __lowercase :
"""simple docstring"""
@deprecated(
__UpperCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=None , ) -> Optional[Any]:
A , A : Any = random_seed.get_seed(__UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A : Optional[int] = dtypes.as_dtype(__UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
A : int = 1_00_00
A : Union[str, Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
A : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A : Dict = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A : Dict = images.astype(numpy.floataa )
A : Optional[Any] = numpy.multiply(__UpperCAmelCase , 1.0 / 2_5_5.0 )
A : Optional[Any] = images
A : int = labels
A : Tuple = 0
A : int = 0
@property
def snake_case ( self ) -> Optional[Any]:
return self._images
@property
def snake_case ( self ) -> List[str]:
return self._labels
@property
def snake_case ( self ) -> Tuple:
return self._num_examples
@property
def snake_case ( self ) -> List[str]:
return self._epochs_completed
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ) -> Optional[int]:
if fake_data:
A : int = [1] * 7_84
A : List[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase )],
[fake_label for _ in range(__UpperCAmelCase )],
)
A : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
A : Dict = self.images[perma]
A : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A : Union[str, Any] = self._num_examples - start
A : str = self._images[start : self._num_examples]
A : Union[str, Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
A : str = self.images[perm]
A : Dict = self.labels[perm]
# Start next epoch
A : Dict = 0
A : List[Any] = batch_size - rest_num_examples
A : int = self._index_in_epoch
A : Dict = self._images[start:end]
A : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A : Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , '''Please write your own downloading logic.''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
A : List[str] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
A : int = f.size()
print('''Successfully downloaded''' , lowerCamelCase_ , lowerCamelCase_ , '''bytes.''' )
return filepath
@deprecated(
lowerCamelCase_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=dtypes.floataa , lowerCamelCase_=True , lowerCamelCase_=5000 , lowerCamelCase_=None , lowerCamelCase_=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
A : List[str] = fake()
A : int = fake()
A : Optional[int] = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
A : str = DEFAULT_SOURCE_URL
A : List[Any] = '''train-images-idx3-ubyte.gz'''
A : Dict = '''train-labels-idx1-ubyte.gz'''
A : List[Any] = '''t10k-images-idx3-ubyte.gz'''
A : Optional[Any] = '''t10k-labels-idx1-ubyte.gz'''
A : Tuple = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Tuple = _extract_images(lowerCamelCase_ )
A : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Tuple = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
A : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : Union[str, Any] = _extract_images(lowerCamelCase_ )
A : List[Any] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , '''rb''' ) as f:
A : str = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
A : Union[str, Any] = (
'''Validation size should be between 0 and '''
F'{len(lowerCamelCase_ )}. Received: {validation_size}.'
)
raise ValueError(lowerCamelCase_ )
A : Tuple = train_images[:validation_size]
A : str = train_labels[:validation_size]
A : int = train_images[validation_size:]
A : Optional[Any] = train_labels[validation_size:]
A : List[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
A : Optional[int] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
A : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
A : Union[str, Any] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 423
| 0
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=sys.maxsize ):
"""simple docstring"""
_lowerCAmelCase = 'bilinear'
_lowerCAmelCase = max_size
_lowerCAmelCase = short_edge_length
def __call__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = []
for img in imgs:
_lowerCAmelCase , _lowerCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
_lowerCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_lowerCAmelCase = size * 1.0 / min(__magic_name__ , __magic_name__ )
if h < w:
_lowerCAmelCase , _lowerCAmelCase = size, scale * w
else:
_lowerCAmelCase , _lowerCAmelCase = scale * h, size
if max(__magic_name__ , __magic_name__ ) > self.max_size:
_lowerCAmelCase = self.max_size * 1.0 / max(__magic_name__ , __magic_name__ )
_lowerCAmelCase = newh * scale
_lowerCAmelCase = neww * scale
_lowerCAmelCase = int(neww + 0.5 )
_lowerCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
_lowerCAmelCase = Image.fromarray(__magic_name__ )
_lowerCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_lowerCAmelCase = np.asarray(__magic_name__ )
else:
_lowerCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_lowerCAmelCase = nn.functional.interpolate(
__magic_name__ , (newh, neww) , mode=self.interp_method , align_corners=__magic_name__ ).squeeze(0 )
img_augs.append(__magic_name__ )
return img_augs
class __magic_name__ :
def __init__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_lowerCAmelCase = cfg.INPUT.FORMAT
_lowerCAmelCase = cfg.SIZE_DIVISIBILITY
_lowerCAmelCase = cfg.PAD_VALUE
_lowerCAmelCase = cfg.INPUT.MAX_SIZE_TEST
_lowerCAmelCase = cfg.MODEL.DEVICE
_lowerCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowerCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_lowerCAmelCase = lambda __magic_name__ : (x - self.pixel_mean) / self.pixel_std
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = tuple(max(__magic_name__ ) for s in zip(*[img.shape for img in images] ) )
_lowerCAmelCase = [im.shape[-2:] for im in images]
_lowerCAmelCase = [
nn.functional.pad(
__magic_name__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__magic_name__ , __magic_name__ )
]
return torch.stack(__magic_name__ ), torch.tensor(__magic_name__ )
def __call__( self , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(__magic_name__ , __magic_name__ ):
_lowerCAmelCase = [images]
if single_image:
assert len(__magic_name__ ) == 1
for i in range(len(__magic_name__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__magic_name__ , images.pop(__magic_name__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__magic_name__ , torch.as_tensor(img_tensorize(images.pop(__magic_name__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_lowerCAmelCase = torch.tensor([im.shape[:2] for im in images] )
_lowerCAmelCase = self.aug(__magic_name__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_lowerCAmelCase = [self.normalizer(__magic_name__ ) for x in images]
# now pad them to do the following operations
_lowerCAmelCase , _lowerCAmelCase = self.pad(__magic_name__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_lowerCAmelCase = torch.true_divide(__magic_name__ , __magic_name__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
assert torch.isfinite(__lowerCamelCase ).all(), "Box tensor contains infinite or NaN!"
_lowerCAmelCase , _lowerCAmelCase = box_size
tensor[:, 0].clamp_(min=0, max=__lowerCamelCase )
tensor[:, 1].clamp_(min=0, max=__lowerCamelCase )
tensor[:, 2].clamp_(min=0, max=__lowerCamelCase )
tensor[:, 3].clamp_(min=0, max=__lowerCamelCase )
| 589
|
"""simple docstring"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not head:
return True
# split the list to two parts
_lowerCAmelCase , _lowerCAmelCase = head.next, head
while fast and fast.next:
_lowerCAmelCase = fast.next.next
_lowerCAmelCase = slow.next
_lowerCAmelCase = slow.next
_lowerCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCAmelCase = None
while second:
_lowerCAmelCase = second.next
_lowerCAmelCase = node
_lowerCAmelCase = second
_lowerCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCAmelCase = node.next
_lowerCAmelCase = head.next
return True
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = head
while fast and fast.next:
_lowerCAmelCase , _lowerCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCAmelCase = [slow.val]
while slow.next:
_lowerCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCAmelCase = cur.next
return True
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not head or not head.next:
return True
_lowerCAmelCase = {}
_lowerCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(__lowerCamelCase )
else:
_lowerCAmelCase = [pos]
_lowerCAmelCase = head.next
pos += 1
_lowerCAmelCase = pos - 1
_lowerCAmelCase = 0
for v in d.values():
if len(__lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCAmelCase = 0
for i in range(0, len(__lowerCamelCase ) ):
if v[i] + v[len(__lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 589
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase__ :
def __init__( self , a = None ) -> None:
'''simple docstring'''
if components is None:
_UpperCamelCase = []
_UpperCamelCase = list(a )
def __len__( self ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self ) -> str:
'''simple docstring'''
return "(" + ",".join(map(a , self.__components ) ) + ")"
def __add__( self , a ) -> Vector:
'''simple docstring'''
_UpperCamelCase = len(self )
if size == len(a ):
_UpperCamelCase = [self.__components[i] + other.component(a ) for i in range(a )]
return Vector(a )
else:
raise Exception("""must have the same size""" )
def __sub__( self , a ) -> Vector:
'''simple docstring'''
_UpperCamelCase = len(self )
if size == len(a ):
_UpperCamelCase = [self.__components[i] - other.component(a ) for i in range(a )]
return Vector(a )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , a ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self , a ) -> float:
'''simple docstring'''
...
def __mul__( self , a ) -> float | Vector:
'''simple docstring'''
if isinstance(a , (float, int) ):
_UpperCamelCase = [c * other for c in self.__components]
return Vector(a )
elif isinstance(a , a ) and len(self ) == len(a ):
_UpperCamelCase = len(self )
_UpperCamelCase = [self.__components[i] * other.component(a ) for i in range(a )]
return sum(a )
else: # error case
raise Exception("""invalid operand!""" )
def A_ ( self ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def A_ ( self , a ) -> float:
'''simple docstring'''
if isinstance(a , a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def A_ ( self , a , a ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCamelCase = value
def A_ ( self ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_UpperCamelCase = [c**2 for c in self.__components]
return math.sqrt(sum(a ) )
def A_ ( self , a , a = False ) -> float:
'''simple docstring'''
_UpperCamelCase = self * other
_UpperCamelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __A(lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
return Vector([0] * dimension )
def __A(lowerCAmelCase , lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , lowerCAmelCase ))
_UpperCamelCase = [0] * dimension
_UpperCamelCase = 1
return Vector(lowerCAmelCase )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (isinstance(lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Vector:
"""simple docstring"""
random.seed(lowerCAmelCase )
_UpperCamelCase = [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class lowerCAmelCase__ :
def __init__( self , a , a , a ) -> None:
'''simple docstring'''
_UpperCamelCase = matrix
_UpperCamelCase = w
_UpperCamelCase = h
def __str__( self ) -> str:
'''simple docstring'''
_UpperCamelCase = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , a ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase = []
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] + other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , a ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase = []
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] - other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , a ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self , a ) -> Vector:
'''simple docstring'''
...
def __mul__( self , a ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(a , a ): # matrix-vector
if len(a ) == self.__width:
_UpperCamelCase = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] * other.component(a )
for j in range(self.__width )
]
ans.change_component(a , sum(a ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(a , (int, float) ): # matrix-scalar
_UpperCamelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(a , self.__width , self.__height )
return None
def A_ ( self ) -> int:
'''simple docstring'''
return self.__height
def A_ ( self ) -> int:
'''simple docstring'''
return self.__width
def A_ ( self , a , a ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def A_ ( self , a , a , a ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCamelCase = value
else:
raise Exception("""change_component: indices out of bounds""" )
def A_ ( self , a , a ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_UpperCamelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(a ) ):
_UpperCamelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(a , self.__width - 1 , self.__height - 1 ).determinant()
def A_ ( self , a , a ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(a , a )
else:
raise Exception("""Indices out of bounds""" )
def A_ ( self ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCamelCase = [
self.__matrix[0][y] * self.cofactor(0 , a ) for y in range(self.__width )
]
return sum(a )
def __A(lowerCAmelCase ) -> Matrix:
"""simple docstring"""
_UpperCamelCase = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Matrix:
"""simple docstring"""
random.seed(lowerCAmelCase )
_UpperCamelCase = [
[random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 709
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __A(lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if "model" in orig_key:
_UpperCamelCase = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
_UpperCamelCase = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
_UpperCamelCase = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
_UpperCamelCase = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
_UpperCamelCase = orig_key.split(""".""" )[0].split("""_""" )[-1]
_UpperCamelCase = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_UpperCamelCase = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
_UpperCamelCase = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
_UpperCamelCase = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
_UpperCamelCase = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
_UpperCamelCase = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
_UpperCamelCase = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
_UpperCamelCase = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
_UpperCamelCase = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
_UpperCamelCase = """yoso.""" + orig_key
return orig_key
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCamelCase = val
_UpperCamelCase = orig_state_dict["""cls.predictions.decoder.bias"""]
_UpperCamelCase = torch.arange(lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )["""model_state_dict"""]
_UpperCamelCase = YosoConfig.from_json_file(lowerCAmelCase )
_UpperCamelCase = YosoForMaskedLM(lowerCAmelCase )
_UpperCamelCase = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase )
print(model.load_state_dict(lowerCAmelCase ) )
model.eval()
model.save_pretrained(lowerCAmelCase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 202
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 670
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 670
| 1
|
'''simple docstring'''
import math
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = 0
while num > 0:
UpperCAmelCase : List[Any] = num % 8
UpperCAmelCase : Dict = octal + (remainder * math.floor(math.pow(10 , __magic_name__ ) ))
counter += 1
UpperCAmelCase : Optional[int] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(__magic_name__ )}"
def lowercase ( ):
'''simple docstring'''
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 609
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__magic_name__ ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCAmelCase : Any = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
UpperCAmelCase : Tuple = PipelineDataFormat.from_str(
format=__magic_name__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__magic_name__ , __magic_name__ )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = nlp
UpperCAmelCase : str = reader
@staticmethod
def A_ ( snake_case ):
'''simple docstring'''
UpperCAmelCase : str = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=snake_case , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=snake_case , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=snake_case , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=snake_case , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=snake_case , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=snake_case , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=snake_case , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=snake_case , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._nlp, []
for entry in self._reader:
UpperCAmelCase : Dict = nlp(**snake_case ) if self._reader.is_multi_columns else nlp(snake_case )
if isinstance(snake_case , snake_case ):
outputs.append(snake_case )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCAmelCase : str = self._reader.save_binary(snake_case )
logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(snake_case )
| 609
| 1
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> float:
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , ) -> float:
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
__snake_case , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = None
def _snake_case ( lowercase__ , lowercase__=0.9_9_9 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCamelCase : Optional[Any] = []
for i in range(lowercase__ ):
_lowerCamelCase : Optional[Any] = i / num_diffusion_timesteps
_lowerCamelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class lowerCAmelCase__ ( lowercase, lowercase ):
'''simple docstring'''
lowerCamelCase__ = 1
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = 0.00_01 , lowercase = 0.02 , lowercase = "linear" , lowercase = None , lowercase = True , lowercase = True , lowercase = 0 , lowercase = "epsilon" , lowercase = 1.0 , **lowercase , ):
if kwargs.get('set_alpha_to_one' , lowercase ) is not None:
_lowerCamelCase : List[str] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , lowercase , standard_warn=lowercase )
_lowerCamelCase : Union[str, Any] = kwargs['set_alpha_to_one']
if trained_betas is not None:
_lowerCamelCase : Tuple = torch.tensor(lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCamelCase : List[str] = torch.linspace(lowercase , lowercase , lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : Tuple = betas_for_alpha_bar(lowercase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCamelCase : str = 1.0 - self.betas
_lowerCamelCase : str = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCamelCase : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCamelCase : Optional[Any] = 1.0
# setable values
_lowerCamelCase : Dict = None
_lowerCamelCase : int = torch.from_numpy(np.arange(0 , lowercase ).copy().astype(np.intaa ) )
def A_ ( self , lowercase , lowercase = None ):
return sample
def A_ ( self , lowercase , lowercase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCamelCase : int = num_inference_steps
_lowerCamelCase : Optional[int] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Any = (np.arange(0 , lowercase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCamelCase : Any = torch.from_numpy(lowercase ).to(lowercase )
self.timesteps += self.config.steps_offset
def A_ ( self , lowercase , lowercase , lowercase , lowercase = 0.0 , lowercase = False , lowercase = None , lowercase = True , ):
# 1. get previous step value (=t+1)
_lowerCamelCase : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCamelCase : Dict = self.alphas_cumprod[timestep]
_lowerCamelCase : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCamelCase : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCamelCase : Any = model_output
elif self.config.prediction_type == "sample":
_lowerCamelCase : Optional[Any] = model_output
_lowerCamelCase : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCamelCase : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def __len__( self ):
return self.config.num_train_timesteps
| 630
| 0
|
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowercase_ = "naver-clova-ix/donut-base"
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = DonutProcessor.from_pretrained(A )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
_a = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
_a = self.processor.tokenajson(A )
self.assertDictEqual(A , A )
| 352
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = 'pegasus'
__lowerCamelCase : Tuple = ['past_key_values']
__lowerCamelCase : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , A=50_265 , A=1_024 , A=12 , A=4_096 , A=16 , A=12 , A=4_096 , A=16 , A=0.0 , A=0.0 , A=True , A=True , A="gelu" , A=1_024 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0 , A=False , A=0 , A=1 , A=1 , **A , ) -> str:
"""simple docstring"""
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.d_model
| 352
| 1
|
def _UpperCamelCase ( snake_case__ ) -> bool:
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def _UpperCamelCase ( snake_case__ ) -> bool:
__UpperCAmelCase : Any = credit_card_number
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = len(snake_case__ ) - 2
for i in range(snake_case__, -1, -2 ):
# double the value of every second digit
__UpperCAmelCase : Optional[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__UpperCAmelCase : Dict = cc_number[:i] + str(snake_case__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(snake_case__ ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _UpperCamelCase ( snake_case__ ) -> bool:
__UpperCAmelCase : List[str] = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(snake_case__ ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(snake_case__ ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(snake_case__ ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 382
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 382
| 1
|
'''simple docstring'''
def _A ( ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
while len(SCREAMING_SNAKE_CASE__ ) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE__ ) )
i += 1
__SCREAMING_SNAKE_CASE = """""".join(SCREAMING_SNAKE_CASE__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 704
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Optional[Any] = {'vocab_file': 'vocab.json'}
_snake_case : Any = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case : Optional[int] = {'mgp-str': 27}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, _a, _a="[GO]", _a="[GO]", _a="[s]", _a="[GO]", **_a ) -> Dict:
super().__init__(
unk_token=_a, bos_token=_a, eos_token=_a, pad_token=_a, **_a, )
with open(_a, encoding="utf-8" ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(_a )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
@property
def __lowerCAmelCase ( self ) -> List[str]:
return len(self.vocab )
def __lowerCAmelCase ( self ) -> Tuple:
return dict(self.vocab, **self.added_tokens_encoder )
def __lowerCAmelCase ( self, _a ) -> int:
__SCREAMING_SNAKE_CASE = []
for s in text:
char_tokens.extend(_a )
return char_tokens
def __lowerCAmelCase ( self, _a ) -> List[Any]:
return self.vocab.get(_a, self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self, _a ) -> Tuple:
return self.decoder.get(_a )
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error("Vocabulary path ({}) should be a directory".format(_a ) )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(_a, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=_a, ensure_ascii=_a ) + "\n" )
return (vocab_file,)
| 214
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A_ : Tuple = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ['''DeiTFeatureExtractor''']
A_ : Union[str, Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 196
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = FlaxViTModel(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase = (self.patch_size, self.patch_size)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxViTForImageClassification(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxViTForImageClassification(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = FlaxViTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
_UpperCAmelCase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_UpperCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 618
| 0
|
"""simple docstring"""
def snake_case (A_ :Dict ):
'''simple docstring'''
a : Dict = len(SCREAMING_SNAKE_CASE_ )
a : str = sum(SCREAMING_SNAKE_CASE_ )
a : str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
a : List[Any] = True
for i in range(1 , s + 1 ):
a : Optional[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
a : List[str] = dp[i][j - 1]
if arr[i - 1] <= j:
a : List[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
a : str = s - 2 * j
break
return diff
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Optional[Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 118
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Any = "▁"
_UpperCamelCase : Any = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_UpperCamelCase : str = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_UpperCamelCase : Optional[int] = {
"facebook/m2m100_418M": 10_24,
}
# fmt: off
_UpperCamelCase : str = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class UpperCAmelCase_ ( A_):
lowerCamelCase__ : int = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[int] = ["input_ids", "attention_mask"]
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = []
def __init__( self , a , a , a=None , a=None , a="<s>" , a="</s>" , a="</s>" , a="<pad>" , a="<unk>" , a="m2m100" , a = None , a=8 , **a , ) -> None:
lowercase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ : List[Any] = language_codes
lowercase__ : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase__ : str = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
lowercase__ : Optional[Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a )
for lang_code in fairseq_language_code
if self.get_lang_token(a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a , tgt_lang=a , bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , language_codes=a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a , **a , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : str = load_json(a )
lowercase__ : List[Any] = {v: k for k, v in self.encoder.items()}
lowercase__ : Optional[int] = spm_file
lowercase__ : Union[str, Any] = load_spm(a , self.sp_model_kwargs )
lowercase__ : Optional[int] = len(self.encoder )
lowercase__ : Dict = {
self.get_lang_token(a ): self.encoder_size + i for i, lang_code in enumerate(a )
}
lowercase__ : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a )}
lowercase__ : str = {v: k for k, v in self.lang_token_to_id.items()}
lowercase__ : List[str] = src_lang if src_lang is not None else """en"""
lowercase__ : List[str] = tgt_lang
lowercase__ : List[str] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase__ : Dict = num_madeup_words
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self , a ) -> None:
lowercase__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCAmelCase ( self , a ) -> List[str]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a , self.encoder[self.unk_token] )
def _UpperCAmelCase ( self , a ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a , self.unk_token )
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : Tuple = []
lowercase__ : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
lowercase__ : Tuple = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
lowercase__ : Union[str, Any] = [1] * len(self.prefix_tokens )
lowercase__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Dict = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowercase__ : str = self.__dict__.copy()
lowercase__ : List[Any] = None
return state
def __setstate__( self , a ) -> None:
lowercase__ : Any = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ : int = {}
lowercase__ : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
lowercase__ : Optional[int] = Path(a )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
lowercase__ : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowercase__ : Optional[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , 'wb' ) as fi:
lowercase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def _UpperCAmelCase ( self , a , a = "en" , a = None , a = "ro" , **a , ) -> BatchEncoding:
lowercase__ : List[Any] = src_lang
lowercase__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a , a , **a )
def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase__ : List[str] = src_lang
lowercase__ : str = self(a , add_special_tokens=a , **a )
lowercase__ : List[Any] = self.get_lang_id(a )
lowercase__ : str = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ) -> Any:
self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self , a ) -> None:
lowercase__ : List[Any] = self.get_lang_token(a )
lowercase__ : List[str] = self.lang_token_to_id[lang_token]
lowercase__ : str = [self.cur_lang_id]
lowercase__ : int = [self.eos_token_id]
def _UpperCAmelCase ( self , a ) -> None:
lowercase__ : int = self.get_lang_token(a )
lowercase__ : Optional[Any] = self.lang_token_to_id[lang_token]
lowercase__ : List[Any] = [self.cur_lang_id]
lowercase__ : Tuple = [self.eos_token_id]
def _UpperCAmelCase ( self , a ) -> str:
return self.lang_code_to_token[lang]
def _UpperCAmelCase ( self , a ) -> int:
lowercase__ : int = self.get_lang_token(a )
return self.lang_token_to_id[lang_token]
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict[str, Any] ):
'''simple docstring'''
lowercase__ : List[str] = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
with open(__snake_case , 'r' ) as f:
return json.load(__snake_case )
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
'''simple docstring'''
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case , indent=2 )
| 599
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( __snake_case : List[str] ):
"""simple docstring"""
for param in module.parameters():
_lowerCamelCase : Optional[Any] = False
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : Any = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def _snake_case ( __snake_case : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = plt.imshow(__snake_case )
fig.axes.get_xaxis().set_visible(__snake_case )
fig.axes.get_yaxis().set_visible(__snake_case )
plt.show()
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 88
| 0
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__magic_name__ = logging.get_logger(__name__)
# General docstring
__magic_name__ = 'ResNetConfig'
# Base docstring
__magic_name__ = 'microsoft/resnet-50'
__magic_name__ = [1, 2_048, 7, 7]
# Image classification docstring
__magic_name__ = 'microsoft/resnet-50'
__magic_name__ = 'tiger cat'
__magic_name__ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_a : int ,_a : str ,_a : List[Any] = 3 ,_a : Optional[Any] = 1 ,_a : List[Any] = "relu" ):
'''simple docstring'''
super().__init__()
A_ : Optional[Any] = nn.Convad(
_a ,_a ,kernel_size=_a ,stride=_a ,padding=kernel_size // 2 ,bias=_a )
A_ : List[Any] = nn.BatchNormad(_a )
A_ : int = ACTaFN[activation] if activation is not None else nn.Identity()
def _a ( self : List[Any] ,_a : List[str] ):
'''simple docstring'''
A_ : Optional[int] = self.convolution(_a )
A_ : List[Any] = self.normalization(_a )
A_ : Optional[int] = self.activation(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,_a : Tuple ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A_ : int = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A_ : Dict = config.num_channels
def _a ( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
A_ : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
A_ : Any = self.embedder(_a )
A_ : str = self.pooler(_a )
return embedding
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : List[Any] ,_a : Any ,_a : Optional[int] = 2 ):
'''simple docstring'''
super().__init__()
A_ : List[Any] = nn.Convad(_a ,_a ,kernel_size=1 ,stride=_a ,bias=_a )
A_ : Optional[int] = nn.BatchNormad(_a )
def _a ( self : List[str] ,_a : int ):
'''simple docstring'''
A_ : Any = self.convolution(_a )
A_ : Union[str, Any] = self.normalization(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : List[str] ,_a : Any ,_a : Dict = 1 ,_a : Optional[Any] = "relu" ):
'''simple docstring'''
super().__init__()
A_ : Dict = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = (
ResNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
A_ : Tuple = nn.Sequential(
ResNetConvLayer(_a ,_a ,stride=_a ) ,ResNetConvLayer(_a ,_a ,activation=_a ) ,)
A_ : int = ACTaFN[activation]
def _a ( self : List[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = hidden_state
A_ : Optional[int] = self.layer(_a )
A_ : Any = self.shortcut(_a )
hidden_state += residual
A_ : Tuple = self.activation(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : List[str] ,_a : int ,_a : Any = 1 ,_a : Union[str, Any] = "relu" ,_a : str = 4 ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = in_channels != out_channels or stride != 1
A_ : List[Any] = out_channels // reduction
A_ : Dict = (
ResNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
A_ : Dict = nn.Sequential(
ResNetConvLayer(_a ,_a ,kernel_size=1 ) ,ResNetConvLayer(_a ,_a ,stride=_a ) ,ResNetConvLayer(_a ,_a ,kernel_size=1 ,activation=_a ) ,)
A_ : Union[str, Any] = ACTaFN[activation]
def _a ( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Any = hidden_state
A_ : List[str] = self.layer(_a )
A_ : Any = self.shortcut(_a )
hidden_state += residual
A_ : int = self.activation(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_a : str ,_a : str ,_a : int ,_a : Optional[Any] = 2 ,_a : List[Any] = 2 ,):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
A_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_a ,_a ,stride=_a ,activation=config.hidden_act ) ,*[layer(_a ,_a ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def _a ( self : Optional[int] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = input
for layer in self.layers:
A_ : str = layer(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_a : str ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_a ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A_ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a ,config.depths[1:] ):
self.stages.append(ResNetStage(_a ,_a ,_a ,depth=_a ) )
def _a ( self : Tuple ,_a : Optional[Any] ,_a : Optional[Any] = False ,_a : Tuple = True ):
'''simple docstring'''
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : Tuple = stage_module(_a )
if output_hidden_states:
A_ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_a ,hidden_states=_a ,)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ResNetConfig
a_ = """resnet"""
a_ = """pixel_values"""
a_ = True
def _a ( self : List[Any] ,_a : int ):
'''simple docstring'''
if isinstance(_a ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(_a ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def _a ( self : str ,_a : List[Any] ,_a : Tuple=False ):
'''simple docstring'''
if isinstance(_a ,_a ):
A_ : Tuple = value
__magic_name__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__magic_name__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,_a : Dict ):
'''simple docstring'''
super().__init__(_a )
A_ : str = config
A_ : Optional[int] = ResNetEmbeddings(_a )
A_ : Union[str, Any] = ResNetEncoder(_a )
A_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _a ( self : str ,_a : str ,_a : str = None ,_a : Optional[int] = None ):
'''simple docstring'''
A_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.embedder(_a )
A_ : List[Any] = self.encoder(
_a ,output_hidden_states=_a ,return_dict=_a )
A_ : str = encoder_outputs[0]
A_ : Optional[Any] = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a ,pooler_output=_a ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
super().__init__(_a )
A_ : Dict = config.num_labels
A_ : List[Any] = ResNetModel(_a )
# classification head
A_ : Optional[Any] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _a ( self : Union[str, Any] ,_a : Union[str, Any] = None ,_a : Optional[int] = None ,_a : List[str] = None ,_a : str = None ,):
'''simple docstring'''
A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[str] = self.resnet(_a ,output_hidden_states=_a ,return_dict=_a )
A_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
A_ : str = self.classifier(_a )
A_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Optional[int] = """single_label_classification"""
else:
A_ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
A_ : Optional[int] = MSELoss()
if self.num_labels == 1:
A_ : List[str] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A_ : Tuple = loss_fct(_a ,_a )
elif self.config.problem_type == "single_label_classification":
A_ : Tuple = CrossEntropyLoss()
A_ : Tuple = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : List[Any] = BCEWithLogitsLoss()
A_ : Optional[Any] = loss_fct(_a ,_a )
if not return_dict:
A_ : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a ,logits=_a ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
super().__init__(_a )
super()._init_backbone(_a )
A_ : List[str] = [config.embedding_size] + config.hidden_sizes
A_ : Tuple = ResNetEmbeddings(_a )
A_ : Optional[Any] = ResNetEncoder(_a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@replace_return_docstrings(output_type=_a ,config_class=_CONFIG_FOR_DOC )
def _a ( self : str ,_a : Any ,_a : List[Any] = None ,_a : Optional[Any] = None ):
'''simple docstring'''
A_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : List[Any] = self.embedder(_a )
A_ : List[str] = self.encoder(_a ,output_hidden_states=_a ,return_dict=_a )
A_ : Optional[int] = outputs.hidden_states
A_ : int = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A_ : Tuple = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_a ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=_a ,)
| 702
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a__) -> float:
"""simple docstring"""
if not nums:
raise ValueError('List is empty')
return sum(a__) / len(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
def __init__( self : int , *snake_case : Optional[Any] , **snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
_snake_case : Tuple = {}
def __UpperCAmelCase ( self : int , snake_case : List[Any] , *snake_case : Optional[Any] , **snake_case : List[Any] ):
"""simple docstring"""
_snake_case : str = super().add_tokens(snake_case , *snake_case , **snake_case )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : Optional[int] , *snake_case : Dict , snake_case : int=1 , **snake_case : int ):
"""simple docstring"""
_snake_case : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
else:
_snake_case : Optional[Any] = []
for i in range(snake_case ):
_snake_case : int = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
_snake_case : Dict = output
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : str , snake_case : Dict=False , snake_case : List[str]=1.0 ):
"""simple docstring"""
if isinstance(snake_case , snake_case ):
_snake_case : int = []
for i in range(len(snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_snake_case : Union[str, Any] = self.token_map[placeholder_token]
_snake_case : Any = tokens[: 1 + int(len(snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
_snake_case : Union[str, Any] = copy.copy(snake_case )
random.shuffle(snake_case )
_snake_case : List[str] = text.replace(snake_case , ' '.join(snake_case ) )
return text
def __call__( self : List[Any] , snake_case : Optional[int] , *snake_case : Any , snake_case : int=False , snake_case : str=1.0 , **snake_case : Optional[Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
def __UpperCAmelCase ( self : int , snake_case : List[str] , *snake_case : List[Any] , snake_case : Optional[int]=False , snake_case : Optional[Any]=1.0 , **snake_case : Any ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
| 517
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "git_vision_model"
def __init__( self : Any , __snake_case : Tuple=7_68 , __snake_case : Optional[Any]=30_72 , __snake_case : Any=12 , __snake_case : Tuple=12 , __snake_case : List[Any]=3 , __snake_case : Dict=2_24 , __snake_case : Union[str, Any]=16 , __snake_case : Optional[Any]="quick_gelu" , __snake_case : List[str]=1e-5 , __snake_case : Dict=0.0 , __snake_case : List[str]=0.02 , **__snake_case : Dict , )-> List[str]:
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = intermediate_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = num_channels
snake_case = patch_size
snake_case = image_size
snake_case = initializer_range
snake_case = attention_dropout
snake_case = layer_norm_eps
snake_case = hidden_act
@classmethod
def lowerCAmelCase ( cls : Dict , __snake_case : Union[str, os.PathLike] , **__snake_case : List[str] )-> "PretrainedConfig":
cls._set_token_in_kwargs(__snake_case )
snake_case , snake_case = cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
snake_case = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__snake_case , **__snake_case )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "git"
def __init__( self : str , __snake_case : List[Any]=None , __snake_case : List[Any]=3_05_22 , __snake_case : Any=7_68 , __snake_case : Union[str, Any]=6 , __snake_case : str=12 , __snake_case : int=30_72 , __snake_case : str="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Any=0.1 , __snake_case : int=10_24 , __snake_case : Optional[int]=0.02 , __snake_case : Union[str, Any]=1e-12 , __snake_case : List[str]=0 , __snake_case : Dict="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : Optional[int]=1_01 , __snake_case : List[Any]=1_02 , __snake_case : str=None , **__snake_case : Tuple , )-> Dict:
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , pad_token_id=__snake_case , **__snake_case )
if vision_config is None:
snake_case = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
snake_case = GitVisionConfig(**__snake_case )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = tie_word_embeddings
snake_case = num_image_with_embedding
snake_case = bos_token_id
snake_case = eos_token_id
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.vision_config.to_dict()
snake_case = self.__class__.model_type
return output
| 517
|
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> List[str]:
snake_case = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def __lowerCamelCase ( __lowerCAmelCase : int ) -> Optional[int]:
snake_case = list(s_dict.keys() )
for key in keys:
snake_case = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
print(F'''{key} -> {new_key}''' )
snake_case = s_dict.pop(__lowerCAmelCase )
return s_dict
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> Union[str, Any]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
snake_case = emb.weight.data
return lin_layer
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> bytes:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case = os.path.basename(__lowerCAmelCase )
snake_case = url.split("""/""" )[-2]
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not os.path.isfile(__lowerCAmelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(__lowerCAmelCase ):
snake_case = open(__lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(__lowerCAmelCase ) as source, open(__lowerCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__lowerCAmelCase , unit_divisor=10_24 ) as loop:
while True:
snake_case = source.read(81_92 )
if not buffer:
break
output.write(__lowerCAmelCase )
loop.update(len(__lowerCAmelCase ) )
snake_case = open(__lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
if ".pt" not in checkpoint_path:
snake_case = _download(_MODELS[checkpoint_path] )
else:
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
snake_case = original_checkpoint["""dims"""]
snake_case = original_checkpoint["""model_state_dict"""]
snake_case = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(__lowerCAmelCase )
rename_keys(__lowerCAmelCase )
snake_case = True
snake_case = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
snake_case = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__lowerCAmelCase , decoder_ffn_dim=__lowerCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
snake_case = WhisperForConditionalGeneration(__lowerCAmelCase )
snake_case , snake_case = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case = proj_out_weights
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 517
| 1
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = AlbertTokenizer
snake_case__ : Dict = AlbertTokenizerFast
snake_case__ : int = True
snake_case__ : Any = True
snake_case__ : Any = True
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Optional[int] = AlbertTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = '''this is a test'''
_lowerCamelCase : Tuple = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = '''<pad>'''
_lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(__lowerCAmelCase ) , 3_0_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Any = self.get_rust_tokenizer()
_lowerCamelCase : Any = '''I was born in 92000, and this is falsé.'''
_lowerCamelCase : List[Any] = tokenizer.tokenize(__lowerCAmelCase )
_lowerCamelCase : Dict = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCamelCase : str = tokenizer.encode(__lowerCAmelCase )
_lowerCamelCase : List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AlbertTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCAmelCase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [4_8, 2_5, 2_1, 1_2_8_9] )
_lowerCamelCase : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCAmelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_lowerCamelCase : Any = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
_lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = AlbertTokenizer(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.encode('''sequence builders''' )
_lowerCamelCase : Tuple = tokenizer.encode('''multi-sequence build''' )
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 83
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83
| 1
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''')
@patch('''builtins.open''')
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : Tuple = Mock()
lowerCAmelCase__ : int = conn, Mock()
lowerCAmelCase__ : List[Any] = iter([1, None])
lowerCAmelCase__ : Dict = lambda lowerCamelCase_: next(lowerCamelCase_)
# ===== invoke =====
send_file(filename='''mytext.txt''' ,testing=lowerCamelCase_)
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 703
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any =logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""encoder-decoder"""
snake_case_ =True
def __init__(self ,**__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''encoder''' )
lowerCAmelCase__ : Any = encoder_config.pop('''model_type''' )
lowerCAmelCase__ : str = kwargs.pop('''decoder''' )
lowerCAmelCase__ : Tuple = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase__ : Tuple = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = AutoConfig.for_model(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : str = True
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[str] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Optional[Any] = self.encoder.to_dict()
lowerCAmelCase__ : str = self.decoder.to_dict()
lowerCAmelCase__ : Optional[int] = self.__class__.model_type
return output
| 90
| 0
|
'''simple docstring'''
from math import pi, sqrt, tan
def __UpperCAmelCase ( A : float ) -> float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def __UpperCAmelCase ( A : float , A : float , A : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __UpperCAmelCase ( A : float ) -> float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def __UpperCAmelCase ( A : float ) -> float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def __UpperCAmelCase ( A : float , A : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __UpperCAmelCase ( A : float , A : float , A : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
UpperCAmelCase_ : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __UpperCAmelCase ( A : float , A : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def __UpperCAmelCase ( A : float , A : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__UpperCAmelCase , 2 ) * torus_radius * tube_radius
def __UpperCAmelCase ( A : float , A : float ) -> float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def __UpperCAmelCase ( A : float ) -> float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def __UpperCAmelCase ( A : float , A : float ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def __UpperCAmelCase ( A : float , A : float , A : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
UpperCAmelCase_ : List[str] = (sidea + sidea + sidea) / 2
UpperCAmelCase_ : Dict = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __UpperCAmelCase ( A : float , A : float ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def __UpperCAmelCase ( A : float , A : float , A : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def __UpperCAmelCase ( A : float ) -> float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def __UpperCAmelCase ( A : float , A : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def __UpperCAmelCase ( A : float , A : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def __UpperCAmelCase ( A : int , A : float ) -> float:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 541
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Tuple = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
| 0
|
from __future__ import annotations
from typing import Any
class _a:
def __init__( self , __snake_case , __snake_case , __snake_case = 0 ) -> None:
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = row, column
_snake_case : Union[str, Any] = [[default_value for c in range(__snake_case )] for r in range(__snake_case )]
def __str__( self ) -> str:
'''simple docstring'''
_snake_case : List[Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
_snake_case : Union[str, Any] = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case : int = max(__snake_case , len(str(__snake_case ) ) )
_snake_case : int = f"""%{max_element_length}s"""
# Make string and return
def single_line(__snake_case ) -> str:
nonlocal string_format_identifier
_snake_case : Any = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__snake_case ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
'''simple docstring'''
return str(self )
def lowercase ( self , __snake_case ) -> bool:
'''simple docstring'''
if not (isinstance(__snake_case , (list, tuple) ) and len(__snake_case ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __snake_case ) -> Any:
'''simple docstring'''
assert self.validate_indicies(__snake_case )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
assert self.validate_indicies(__snake_case )
_snake_case : List[str] = value
def __add__( self , __snake_case ) -> Matrix:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case )
assert self.row == another.row and self.column == another.column
# Add
_snake_case : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case : Union[str, Any] = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
'''simple docstring'''
_snake_case : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case : Optional[Any] = -self[r, c]
return result
def __sub__( self , __snake_case ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self , __snake_case ) -> Matrix:
'''simple docstring'''
if isinstance(__snake_case , (int, float) ): # Scalar multiplication
_snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case : List[Any] = self[r, c] * another
return result
elif isinstance(__snake_case , __snake_case ): # Matrix multiplication
assert self.column == another.row
_snake_case : Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case : List[str] = f"""Unsupported type given for another ({type(__snake_case )})"""
raise TypeError(__snake_case )
def lowercase ( self ) -> Matrix:
'''simple docstring'''
_snake_case : Optional[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case : Tuple = self[r, c]
return result
def lowercase ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case : Tuple = v.transpose()
_snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
# a^(-1)
_snake_case : int = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case : Optional[Any] = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
_snake_case : List[Any] = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case : List[str] = 1, 2, -3
_snake_case : Optional[int] = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case : Optional[int] = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCAmelCase , UpperCAmelCase )}""" )
def A ( ):
import doctest
doctest.testmod()
testa()
| 278
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def A ( UpperCAmelCase ):
if hor == 128:
_snake_case : int = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Tuple = (32, 128, 256)
_snake_case : str = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
_snake_case : List[Any] = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
_snake_case : Optional[int] = (32, 64, 128, 256)
_snake_case : int = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
_snake_case : Dict = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_snake_case : Dict = model.state_dict()
_snake_case : Tuple = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
_snake_case : Any = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : List[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Optional[int] = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A ( ):
_snake_case : Any = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
_snake_case : Dict = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
_snake_case : Optional[int] = model
_snake_case : List[str] = UNetaDModel(**UpperCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : Any = state_dict.pop(UpperCAmelCase )
hf_value_function.load_state_dict(UpperCAmelCase )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 278
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , __snake_case ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = load_tool("text-classification" )
self.tool.setup()
lowerCAmelCase : str = load_tool("text-classification" , remote=a__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 645
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _lowerCAmelCase ( UpperCamelCase__: str ) -> int:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A = k.replace(UpperCamelCase__ , UpperCamelCase__ )
if k.startswith("""encoder""" ):
A = k.replace(""".attn""" , """.self_attn""" )
A = k.replace("""norm1""" , """self_attn_layer_norm""" )
A = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
A = k.replace("""norm1""" , """self_attn_layer_norm""" )
A = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
A = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _lowerCAmelCase ( UpperCamelCase__: Tuple ) -> str:
"""simple docstring"""
A = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
A = sd.pop(UpperCamelCase__ )
A = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
A = v
_lowercase : List[Any] = ["START"]
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] ) -> int:
"""simple docstring"""
A = torch.load(UpperCamelCase__ , map_location="""cpu""" )
A = model["""model"""]
A = BlenderbotConfig.from_json_file(UpperCamelCase__ )
A = BlenderbotForConditionalGeneration(UpperCamelCase__ )
A = m.model.state_dict().keys()
A = []
A = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A = rename_state_dict_key(UpperCamelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase__ )
m.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
m.half()
m.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_lowercase : List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 641
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 594
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_lowercase ="A painting of a squirrel eating a burger"
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =sd_pipe.prepare_inputs(lowerCAmelCase_ )
_lowercase =replicate(lowerCAmelCase_ )
_lowercase =shard(lowerCAmelCase_ )
_lowercase =jax.random.PRNGKey(0 )
_lowercase =jax.random.split(lowerCAmelCase_ , jax.device_count() )
_lowercase =sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowercase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowercase =images[0, 253:256, 253:256, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ):
_lowercase ="stabilityai/stable-diffusion-2"
_lowercase , _lowercase =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder="scheduler" )
_lowercase , _lowercase =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision="bf16" , dtype=jnp.bfloataa , )
_lowercase =scheduler_params
_lowercase ="A painting of a squirrel eating a burger"
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =sd_pipe.prepare_inputs(lowerCAmelCase_ )
_lowercase =replicate(lowerCAmelCase_ )
_lowercase =shard(lowerCAmelCase_ )
_lowercase =jax.random.PRNGKey(0 )
_lowercase =jax.random.split(lowerCAmelCase_ , jax.device_count() )
_lowercase =sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowercase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowercase =images[0, 253:256, 253:256, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 594
| 1
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _A ( ):
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> str:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [128, 64, 32, 16, 8] )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function("hello" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a ( self : Union[str, Any] ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self : List[str] ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self : Dict ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a ( self : Optional[Any] ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = release_memory(SCREAMING_SNAKE_CASE__ )
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
| 61
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
UpperCamelCase_ = False
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return 12
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 12
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_UpperCAmelCase )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = 12
__lowerCAmelCase = 12
__lowerCAmelCase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowerCAmelCase = TransformeraDModel(**_UpperCAmelCase )
return model
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = '''cpu'''
__lowerCAmelCase = self.dummy_vqvae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_transformer
__lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
__lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=_UpperCAmelCase )
__lowerCAmelCase = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
__lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCAmelCase = '''teddy bear playing in the pool'''
__lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__lowerCAmelCase = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__lowerCAmelCase = pipe(
[prompt] , generator=_UpperCAmelCase , output_type="np" , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCAmelCase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = '''cpu'''
__lowerCAmelCase = self.dummy_vqvae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_transformer
__lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
__lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_UpperCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCAmelCase = VQDiffusionPipeline(
vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , )
__lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCAmelCase = '''teddy bear playing in the pool'''
__lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__lowerCAmelCase = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__lowerCAmelCase = pipe(
[prompt] , generator=_UpperCAmelCase , output_type="np" , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCAmelCase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
__lowerCAmelCase = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
__lowerCAmelCase = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__lowerCAmelCase = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_UpperCAmelCase , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 713
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a ( __UpperCAmelCase ):
lowercase_ : Dict = (DPMSolverSinglestepScheduler,)
lowercase_ : Optional[Any] = (('num_inference_steps', 25),)
def UpperCAmelCase__ ( self : Optional[Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**snake_case__ )
return config
def UpperCAmelCase__ ( self : Any , snake_case__ : Tuple=0 , **snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , snake_case__ )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**snake_case__ )
__lowerCAmelCase = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
__lowerCAmelCase = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
__lowerCAmelCase = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : str , snake_case__ : Tuple=0 , **snake_case__ : Tuple ):
"""simple docstring"""
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , snake_case__ )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
__lowerCAmelCase = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
__lowerCAmelCase = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[int] ):
"""simple docstring"""
if scheduler is None:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**snake_case__ )
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**snake_case__ )
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(snake_case__ , snake_case__ )
__lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase = 50
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase = model(snake_case__ , snake_case__ )
__lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_5_7_4 ) < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase = self.full_loop(scheduler=snake_case__ )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
__lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase = self.full_loop(scheduler=snake_case__ )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="dpmsolver++" , solver_order=snake_case__ , solver_type=snake_case__ , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
__lowerCAmelCase = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.check_over_configs(variance_type=snake_case__ )
self.check_over_configs(variance_type="learned_range" )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_7_9_1 ) < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop(use_karras_sigmas=snake_case__ )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2_2_4_8 ) < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.1_4_5_3 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=snake_case__ )
__lowerCAmelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.0_6_4_9 ) < 1E-3
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
__lowerCAmelCase = scheduler_class(**snake_case__ )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(snake_case__ , snake_case__ )
__lowerCAmelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 376
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = MvpTokenizer
lowerCAmelCase_ : List[Any] = MvpTokenizerFast
lowerCAmelCase_ : str = True
lowerCAmelCase_ : List[str] = filter_roberta_detectors
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().setUp()
snake_case_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ = dict(zip(a__ , range(len(a__ ) ) ) )
snake_case_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ = {"unk_token": "<unk>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
def lowerCAmelCase__ ( self , **a__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
snake_case_ = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(a__ , max_length=len(a__ ) , padding=a__ , return_tensors="pt" )
self.assertIsInstance(a__ , a__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(a__ , a__ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(a__ , padding=a__ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , a__ )
self.assertIn("attention_mask" , a__ )
self.assertNotIn("labels" , a__ )
self.assertNotIn("decoder_attention_mask" , a__ )
@require_torch
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(text_target=a__ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] , padding=a__ , truncation=a__ , return_tensors="pt" )
self.assertIsInstance(a__ , a__ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = ["A long paragraph for summarization."]
snake_case_ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ = tokenizer(a__ , text_target=a__ , return_tensors="pt" )
snake_case_ = inputs["input_ids"]
snake_case_ = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
snake_case_ = self.tokenizer_class.from_pretrained(a__ , **a__ )
snake_case_ = "A, <mask> AllenNLP sentence."
snake_case_ = tokenizer_r.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
snake_case_ = tokenizer_p.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 400
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
try:
snake_case_ = tempfile.mktemp()
with open(a__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a__ )
snake_case_ = AlbertTokenizer.from_pretrained(a__ )
finally:
os.remove(a__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a__ )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
snake_case_ = TOKEN
HfFolder.save_token(a__ )
@classmethod
def lowerCAmelCase__ ( cls ) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a__ , repo_id="test-tokenizer" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = CustomTokenizer(a__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizerFast.from_pretrained(a__ )
bert_tokenizer.save_pretrained(a__ )
snake_case_ = CustomTokenizerFast.from_pretrained(a__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case_ = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=a__ , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
snake_case_ = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a__ , ["AB", "C"] )
| 400
| 1
|
from ....utils import logging
A_ = logging.get_logger(__name__)
class __lowercase ( __A ):
def __init__( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=None , __lowerCamelCase : str=20_48 ) -> Union[str, Any]:
'''simple docstring'''
lowercase = config.__dict__
lowercase = modal_hidden_size
if num_labels:
lowercase = num_labels
| 704
|
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = math.sqrt(UpperCAmelCase )
lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0, UpperCAmelCase ):
for j in range(0, UpperCAmelCase ):
lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(UpperCAmelCase, UpperCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, )-> np.ndarray:
"""simple docstring"""
lowercase = np.zeros(img.shape )
lowercase = get_gauss_kernel(UpperCAmelCase, UpperCAmelCase )
lowercase ,lowercase = img.shape
for i in range(kernel_size // 2, size_x - kernel_size // 2 ):
for j in range(kernel_size // 2, size_y - kernel_size // 2 ):
lowercase = get_slice(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowercase = vec_gaussian(UpperCAmelCase, UpperCAmelCase )
lowercase = np.multiply(UpperCAmelCase, UpperCAmelCase )
lowercase = np.multiply(UpperCAmelCase, UpperCAmelCase )
lowercase = np.sum(UpperCAmelCase ) / np.sum(UpperCAmelCase )
lowercase = val
return imga
def __UpperCAmelCase ( UpperCAmelCase )-> tuple:
"""simple docstring"""
lowercase = args[1] if args[1:] else '''../image_data/lena.jpg'''
lowercase = float(args[2] ) if args[2:] else 1.0
lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowercase = int(args[4] )
lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A_ , A_ , A_ , A_ = parse_args(sys.argv)
A_ = cva.imread(filename, 0)
cva.imshow("input image", img)
A_ = img / 255
A_ = out.astype("float32")
A_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A_ = out * 255
A_ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 479
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case :int ={
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case :Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 106
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase = get_logger()
lowercase = None
class __lowercase ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
'''simple docstring'''
def __init__( self : List[str] , _a : Optional[Any]=None , _a : Any=None , **_a : List[Any] ):
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCamelCase__ = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
UpperCamelCase__ = str(jax.devices()[0] )
UpperCamelCase__ = jnp_array_kwargs
@staticmethod
def A_ ( ):
import jax
return {str(_a ): device for device in jax.devices()}
def A_ ( self : Optional[int] , _a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def A_ ( self : Optional[int] , _a : List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
else:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
UpperCamelCase__ = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def A_ ( self : Optional[Any] , _a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '''__array__''' ) and not isinstance(_a , jax.Array ):
UpperCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def A_ ( self : int , _a : dict ):
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def A_ ( self : List[Any] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_row(_a )
UpperCamelCase__ = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def A_ ( self : Optional[int] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_column(_a )
UpperCamelCase__ = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
UpperCamelCase__ = self.recursive_tensorize(_a )
UpperCamelCase__ = self._consolidate(_a )
return column
def A_ ( self : int , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_batch(_a )
UpperCamelCase__ = self.python_features_decoder.decode_batch(_a )
UpperCamelCase__ = self.recursive_tensorize(_a )
for column_name in batch:
UpperCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 591
|
import functools
from typing import Any
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : list[str] ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ) or len(UpperCamelCase__ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(UpperCamelCase__, UpperCamelCase__ ) or not all(
isinstance(UpperCamelCase__, UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
UpperCamelCase__ = {}
UpperCamelCase__ = '''WORD_KEEPER'''
for word in words:
UpperCamelCase__ = trie
for c in word:
if c not in trie_node:
UpperCamelCase__ = {}
UpperCamelCase__ = trie_node[c]
UpperCamelCase__ = True
UpperCamelCase__ = len(UpperCamelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCamelCase__ : int ) -> bool:
if index == len_string:
return True
UpperCamelCase__ = trie
for i in range(UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = trie_node.get(string[i], UpperCamelCase__ )
if trie_node is None:
return False
if trie_node.get(UpperCamelCase__, UpperCamelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 591
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : List[str] = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A_ : int = TypeVar('T')
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (position - 1) // 2
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (2 * position) + 1
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (2 * position) + 2
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = []
__UpperCAmelCase = {}
__UpperCAmelCase = 0
def __len__(self ) -> int:
return self.elements
def __repr__(self ) -> str:
return str(self.heap )
def lowerCAmelCase_ (self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase = self.elements
self.elements += 1
self._bubble_up(lowercase__ )
def lowerCAmelCase_ (self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase = self.heap[0]
self._bubble_down(lowercase__ )
return elem
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> None:
# Update the weight of the given key
__UpperCAmelCase = self.position_map[elem]
__UpperCAmelCase = (elem, weight)
if position > 0:
__UpperCAmelCase = get_parent_position(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase__ )
else:
self._bubble_down(lowercase__ )
else:
self._bubble_down(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase = get_parent_position(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_up(lowercase__ )
return None
def lowerCAmelCase_ (self , lowercase__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase = self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase = self.heap[curr_pos]
__UpperCAmelCase = get_child_left_position(lowercase__ )
__UpperCAmelCase = get_child_right_position(lowercase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase__ , lowercase__ )
return self._bubble_down(lowercase__ )
return None
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase = self.heap[nodea_pos][0]
__UpperCAmelCase = self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase = nodea_pos
__UpperCAmelCase = nodea_pos
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = {}
__UpperCAmelCase = 0
def __repr__(self ) -> str:
return str(self.connections )
def __len__(self ) -> int:
return self.nodes
def lowerCAmelCase_ (self , lowercase__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase = {}
self.nodes += 1
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(lowercase__ )
self.add_node(lowercase__ )
__UpperCAmelCase = weight
__UpperCAmelCase = weight
def __a ( SCREAMING_SNAKE_CASE , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
__UpperCAmelCase = {node: maxsize for node in graph.connections}
__UpperCAmelCase = {node: None for node in graph.connections}
__UpperCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase = priority_queue.extract_min()
__UpperCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
__UpperCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(SCREAMING_SNAKE_CASE , dist[neighbour] )
__UpperCAmelCase = node
return dist, parent
| 303
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCAmelCase : Optional[Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _SCREAMING_SNAKE_CASE ( a ) -> Union[str, Any]:
if isinstance(a , torch.Tensor ):
return image
elif isinstance(a , PIL.Image.Image ):
__A : int = [image]
__A : int = [trans(img.convert('RGB' ) ) for img in image]
__A : Dict = torch.stack(a )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__A : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_A , scheduler=_A )
def UpperCAmelCase_ ( self , _A ):
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : str = max(num_inference_steps - init_timestep , 0 )
__A : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__A : Dict = init_latents.shape
__A : Optional[int] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
print('add noise to latents at timestep' , _A )
__A : List[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
@torch.no_grad()
def __call__( self , _A = None , _A = 0.8 , _A = 1 , _A = None , _A = 0.0 , _A = 50 , _A = None , _A = "pil" , _A = True , ):
self.check_inputs(_A )
# 2. Preprocess image
__A : int = preprocess(_A )
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
__A : Tuple = self.get_timesteps(_A , _A , self.device )
__A : int = timesteps[:1].repeat(_A )
# 4. Prepare latent variables
__A : Dict = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A )
__A : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_A ):
# 1. predict noise model_output
__A : Optional[Any] = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A : List[Any] = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
__A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__A : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A : Optional[int] = self.numpy_to_pil(_A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A )
| 715
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Optional[int] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list[float] ):
if len(__SCREAMING_SNAKE_CASE ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowercase_ : List[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425
|
"""simple docstring"""
import qiskit
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowercase_ : List[str] = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowercase_ : List[Any] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =single_qubit_measure(2, 2)
print(F"Total count for various states are: {counts}")
| 425
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=a ):
__UpperCAmelCase : int = ["""torch""", """scipy"""]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __lowerCAmelCase ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 691
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =BlipImageProcessor()
_SCREAMING_SNAKE_CASE =GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
_SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(_A , _A , _A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).tokenizer
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).image_processor
def UpperCamelCase_ ( self , **_A ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_A ).qformer_tokenizer
def UpperCamelCase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
self.assertIsInstance(processor.qformer_tokenizer , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_A , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
_SCREAMING_SNAKE_CASE ="""lower newer"""
_SCREAMING_SNAKE_CASE =processor(text=_A )
_SCREAMING_SNAKE_CASE =tokenizer(_A , return_token_type_ids=_A )
_SCREAMING_SNAKE_CASE =qformer_tokenizer(_A , return_token_type_ids=_A )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
_SCREAMING_SNAKE_CASE ="""lower newer"""
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_A , images=_A )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_A )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_qformer_tokenizer()
_SCREAMING_SNAKE_CASE =InstructBlipProcessor(
tokenizer=_A , image_processor=_A , qformer_tokenizer=_A )
_SCREAMING_SNAKE_CASE ="""lower newer"""
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_A , images=_A )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 255
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Any = UNetaDModel
__snake_case : str = """sample"""
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Tuple = (32, 32)
lowerCAmelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : List[str] ):
return (3, 32, 32)
@property
def A ( self : List[Any] ):
return (3, 32, 32)
def A ( self : Optional[int] ):
lowerCAmelCase_ : Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Dict = UNetaDModel
__snake_case : List[str] = """sample"""
@property
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = 4
lowerCAmelCase_ : Dict = 4
lowerCAmelCase_ : Optional[Any] = (32, 32)
lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Any ):
return (4, 32, 32)
@property
def A ( self : str ):
return (4, 32, 32)
def A ( self : List[str] ):
lowerCAmelCase_ : str = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def A ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A ( self : List[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A ( self : List[str] ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
lowerCAmelCase_ : int = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : Dict = noise.to(UpperCAmelCase )
lowerCAmelCase_ : int = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = model_accelerate(UpperCAmelCase , UpperCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase_ , lowerCAmelCase_ : int = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
lowerCAmelCase_ : List[str] = model_normal_load(UpperCAmelCase , UpperCAmelCase )["""sample"""]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : List[str] = noise.to(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Any = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 ) )
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : int = UNetaDModel
__snake_case : Any = """sample"""
@property
def A ( self : Optional[int] , UpperCAmelCase : Optional[int]=(32, 32) ):
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
return (3, 32, 32)
@property
def A ( self : Tuple ):
return (3, 32, 32)
def A ( self : Tuple ):
lowerCAmelCase_ : str = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCAmelCase_ : Any = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = self.dummy_input
lowerCAmelCase_ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(UpperCAmelCase )
lowerCAmelCase_ : Dict = noise
lowerCAmelCase_ : str = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Union[str, Any] = (2_56, 2_56)
lowerCAmelCase_ : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : List[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCAmelCase )
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : Optional[int] = 3
lowerCAmelCase_ : Tuple = (32, 32)
lowerCAmelCase_ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : int = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Optional[Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def A ( self : int ):
# not required for this model
pass
| 600
| 0
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( A__ , unittest.TestCase ):
UpperCamelCase__ = CodeGenTokenizer
UpperCamelCase__ = CodeGenTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def snake_case_ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
A__ = dict(zip(a__ , range(len(a__))))
A__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(a__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(a__))
def snake_case_ ( self , **a__):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__)
def snake_case_ ( self , **a__):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__)
def snake_case_ ( self , a__):
A__ = '''lower newer'''
A__ = '''lower newer'''
return input_text, output_text
def snake_case_ ( self):
A__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
A__ = '''lower newer'''
A__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A__ = tokenizer.tokenize(a__ , add_prefix_space=a__)
self.assertListEqual(a__ , a__)
A__ = tokens + [tokenizer.unk_token]
A__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , a__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=a__)
A__ = '''lower newer'''
# Testing tokenization
A__ = tokenizer.tokenize(a__ , add_prefix_space=a__)
A__ = rust_tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__)
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=a__)
A__ = tokenizer.encode(a__ , add_prefix_space=a__)
A__ = rust_tokenizer.encode(a__)
self.assertListEqual(a__ , a__)
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__) , a__)
def snake_case_ ( self , *a__ , **a__):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case_ ( self , a__=1_5):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
A__ = self.rust_tokenizer_class.from_pretrained(a__ , **a__)
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
def snake_case_ ( self):
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''')
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(a__ , padding='''max_length''' , max_length=3_0 , return_tensors='''np''')
A__ = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''')
A__ = tokenizer(*a__ , padding='''max_length''' , max_length=6_0 , return_tensors='''np''')
A__ = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''')
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0)
self.assertTrue(pad_token_id in out_s['''input_ids'''])
self.assertTrue(0 in out_s['''attention_mask'''])
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0])
self.assertFalse(0 in out_sa['''attention_mask'''][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1])
self.assertTrue(0 in out_sa['''attention_mask'''][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0)
self.assertTrue(pad_token_id in out_p['''input_ids'''])
self.assertTrue(0 in out_p['''attention_mask'''])
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0])
self.assertFalse(0 in out_pa['''attention_mask'''][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1])
self.assertTrue(0 in out_pa['''attention_mask'''][1])
def snake_case_ ( self):
A__ = '''$$$'''
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__)
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = tokenizer.bos_token_id
A__ = tokenizer(a__)
A__ = tokenizer(a__)
self.assertEqual(out_s.input_ids[0] , a__)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
A__ = tokenizer.decode(out_s.input_ids)
A__ = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , a__)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def snake_case_ ( self):
A__ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''')
A__ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
A__ = '''\nif len_a > len_b: result = a\nelse: result = b'''
A__ = tokenizer.encode(a__)
A__ = ['''^#''', re.escape('''<|endoftext|>'''), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
A__ = tokenizer.decode(a__ , truncate_before_pattern=a__)
self.assertEqual(a__ , a__)
def snake_case_ ( self):
pass
| 526
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526
| 1
|
"""simple docstring"""
def a__ ( lowerCAmelCase = 1 , lowerCAmelCase = 10_00 ) -> Dict:
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : Any = 0
for divide_by_number in range(__A , digit + 1 ):
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : str = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__A ):
UpperCAmelCase__ : Tuple = len(__A )
UpperCAmelCase__ : int = divide_by_number
else:
has_been_divided.append(__A )
UpperCAmelCase__ : str = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __a :
def __init__( self : str , snake_case_ : Optional[int] , snake_case_ : List[str]=13 , snake_case_ : Tuple=7 , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : List[str]=False , snake_case_ : List[Any]=True , snake_case_ : Union[str, Any]=99 , snake_case_ : Union[str, Any]=32 , snake_case_ : str=5 , snake_case_ : str=4 , snake_case_ : Union[str, Any]=37 , snake_case_ : List[str]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Any=5_12 , snake_case_ : Union[str, Any]=16 , snake_case_ : str=2 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : List[str]=3 , snake_case_ : Union[str, Any]=4 , snake_case_ : Tuple=None , )-> Union[str, Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_input_mask
__lowerCAmelCase =use_token_type_ids
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_act
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =type_vocab_size
__lowerCAmelCase =type_sequence_label_size
__lowerCAmelCase =initializer_range
__lowerCAmelCase =num_labels
__lowerCAmelCase =num_choices
__lowerCAmelCase =scope
def UpperCamelCase ( self : Tuple)-> Optional[int]:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =None
if self.use_input_mask:
__lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase =None
if self.use_token_type_ids:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase =None
__lowerCAmelCase =None
__lowerCAmelCase =None
if self.use_labels:
__lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Any)-> List[str]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , use_stable_embedding=snake_case_ , )
def UpperCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict)-> Union[str, Any]:
__lowerCAmelCase =OpenLlamaModel(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_)
__lowerCAmelCase =model(snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self : Tuple , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , )-> Optional[Any]:
__lowerCAmelCase =True
__lowerCAmelCase =OpenLlamaModel(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
__lowerCAmelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Tuple , )-> Any:
__lowerCAmelCase =OpenLlamaForCausalLM(config=snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self : str , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] , )-> int:
__lowerCAmelCase =True
__lowerCAmelCase =True
__lowerCAmelCase =OpenLlamaForCausalLM(config=snake_case_)
model.to(snake_case_)
model.eval()
# first forward pass
__lowerCAmelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
__lowerCAmelCase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size)
__lowerCAmelCase =ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1)
__lowerCAmelCase =torch.cat([input_mask, next_mask] , dim=-1)
__lowerCAmelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )["""hidden_states"""][0]
__lowerCAmelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["""hidden_states"""][0]
# select random slice
__lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1]).item()
__lowerCAmelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-3))
def UpperCamelCase ( self : Union[str, Any])-> List[str]:
__lowerCAmelCase =self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) =config_and_inputs
__lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Union[str, Any])-> int:
__lowerCAmelCase =OpenLlamaModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , hidden_size=37)
def UpperCamelCase ( self : List[Any])-> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : int)-> int:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def UpperCamelCase ( self : Optional[int])-> Any:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase =type
self.model_tester.create_and_check_model(*snake_case_)
def UpperCamelCase ( self : Optional[Any])-> Any:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase =3
__lowerCAmelCase =input_dict["""input_ids"""]
__lowerCAmelCase =input_ids.ne(1).to(snake_case_)
__lowerCAmelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase =OpenLlamaForSequenceClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCamelCase ( self : Any)-> List[str]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase =3
__lowerCAmelCase ="""single_label_classification"""
__lowerCAmelCase =input_dict["""input_ids"""]
__lowerCAmelCase =input_ids.ne(1).to(snake_case_)
__lowerCAmelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase =OpenLlamaForSequenceClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCamelCase ( self : int)-> List[str]:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase =3
__lowerCAmelCase ="""multi_label_classification"""
__lowerCAmelCase =input_dict["""input_ids"""]
__lowerCAmelCase =input_ids.ne(1).to(snake_case_)
__lowerCAmelCase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__lowerCAmelCase =OpenLlamaForSequenceClassification(snake_case_)
model.to(snake_case_)
model.eval()
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""")
def UpperCamelCase ( self : str)-> int:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)])
def UpperCamelCase ( self : Dict , snake_case_ : List[str])-> Tuple:
__lowerCAmelCase , __lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase =ids_tensor([1, 10] , config.vocab_size)
__lowerCAmelCase =ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase =OpenLlamaModel(snake_case_)
original_model.to(snake_case_)
original_model.eval()
__lowerCAmelCase =original_model(snake_case_).last_hidden_state
__lowerCAmelCase =original_model(snake_case_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase ={"""type""": scaling_type, """factor""": 1_0.0}
__lowerCAmelCase =OpenLlamaModel(snake_case_)
scaled_model.to(snake_case_)
scaled_model.eval()
__lowerCAmelCase =scaled_model(snake_case_).last_hidden_state
__lowerCAmelCase =scaled_model(snake_case_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5))
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> Any:
__lowerCAmelCase =[]
__lowerCAmelCase =set({"""(""", """[""", """{"""} )
__lowerCAmelCase =set({""")""", """]""", """}"""} )
__lowerCAmelCase ={"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__lowerCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__lowerCamelCase ) == 0 or (len(__lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__lowerCamelCase ) == 0
def __lowerCAmelCase ( ) -> List[str]:
__lowerCAmelCase =input("""Enter sequence of brackets: """ )
if is_balanced(__lowerCamelCase ):
print(__lowerCamelCase , """is balanced""" )
else:
print(__lowerCamelCase , """is not balanced""" )
if __name__ == "__main__":
main()
| 456
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 2
__UpperCAmelCase : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCamelCase__ )
if n > 1:
factors.append(lowerCamelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
|
'''simple docstring'''
import operator
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None ) -> list:
"""simple docstring"""
__UpperCAmelCase : Tuple = operator.lt if reverse else operator.gt
__UpperCAmelCase : Any = solution or []
if not arr:
return solution
__UpperCAmelCase : Dict = [arr.pop(0 )]
for i, item in enumerate(lowerCamelCase__ ):
if _operator(lowerCamelCase__ , sublist[-1] ):
sublist.append(lowerCamelCase__ )
arr.pop(lowerCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase__ )
else:
while sublist:
__UpperCAmelCase : int = sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase__ ):
if not _operator(lowerCamelCase__ , lowerCamelCase__ ):
solution.insert(lowerCamelCase__ , lowerCamelCase__ )
break
else:
solution.append(lowerCamelCase__ )
strand_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 168
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _UpperCamelCase ( UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
return new_state_dict
def _UpperCamelCase ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = ''
if is_panoptic:
lowerCAmelCase__ = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
lowerCAmelCase__ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[:256, :]
lowerCAmelCase__ = in_proj_bias[:256]
lowerCAmelCase__ = in_proj_weight[256:512, :]
lowerCAmelCase__ = in_proj_bias[256:512]
lowerCAmelCase__ = in_proj_weight[-256:, :]
lowerCAmelCase__ = in_proj_bias[-256:]
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCAmelCase__ = 'resnet101'
if "dc5" in model_name:
lowerCAmelCase__ = True
lowerCAmelCase__ = 'panoptic' in model_name
if is_panoptic:
lowerCAmelCase__ = 250
else:
lowerCAmelCase__ = 91
lowerCAmelCase__ = 'huggingface/label-files'
lowerCAmelCase__ = 'coco-detection-id2label.json'
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
# load image processor
lowerCAmelCase__ = 'coco_panoptic' if is_panoptic else 'coco_detection'
lowerCAmelCase__ = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors='pt' )
lowerCAmelCase__ = encoding['pixel_values']
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
lowerCAmelCase__ = torch.hub.load('DeppMeng/ConditionalDETR' , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
lowerCAmelCase__ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCAmelCase__ = 'conditional_detr.' + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
lowerCAmelCase__ = conditional_detr(UpperCamelCase_ )
lowerCAmelCase__ = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 365
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = BertConfig.from_json_file(UpperCamelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ = BertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 365
| 1
|
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str = "AAPL" ):
"""simple docstring"""
a_ : Optional[Any] = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
a_ : Optional[int] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , """html.parser""" )
a_ : Tuple = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 419
|
from __future__ import annotations
from cmath import sqrt
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
a_ : Any = b * b - 4 * a * c
a_ : List[str] = (-b + sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
a_ : Union[str, Any] = (-b - sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCamelCase ( ):
"""simple docstring"""
a_ , a_ : str = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 419
| 1
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
snake_case_ : Optional[Any] = {
"b0": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_24,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_40,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 14_08,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_60,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 15_36,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_00,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 17_92,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_80,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 20_48,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_56,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 23_04,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_28,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 25_60,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_00,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Dict:
UpperCAmelCase_ : int = EfficientNetConfig()
UpperCAmelCase_ : str = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase_ : Any = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase_ : Tuple = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase_ : List[Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase_ : int = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase_ : Optional[int] = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase_ : int = '''huggingface/label-files'''
UpperCAmelCase_ : Dict = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : Union[str, Any] = 1000
UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ), '''r''' ) )
UpperCAmelCase_ : str = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase_ : str = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size}, image_mean=[0.4_85, 0.4_56, 0.4_06], image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63], do_center_crop=SCREAMING_SNAKE_CASE__, )
return preprocessor
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
UpperCAmelCase_ : Dict = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase_ : Optional[int] = sorted(set(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = {b: str(SCREAMING_SNAKE_CASE__ ) for b, i in zip(SCREAMING_SNAKE_CASE__, range(SCREAMING_SNAKE_CASE__ ) )}
UpperCAmelCase_ : Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase_ : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase_ : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase_ : Optional[int] = '''efficientnet.''' + item[1]
UpperCAmelCase_ : Any = '''classifier.weight'''
UpperCAmelCase_ : Dict = '''classifier.bias'''
return key_mapping
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase_ : List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(3, 2, 0, 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(2, 3, 0, 1 )
elif "kernel" in key:
UpperCAmelCase_ : str = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE__ ) )
else:
UpperCAmelCase_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
UpperCAmelCase_ : Optional[Any] = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE__, weights='''imagenet''', input_tensor=SCREAMING_SNAKE_CASE__, input_shape=SCREAMING_SNAKE_CASE__, pooling=SCREAMING_SNAKE_CASE__, classes=1000, classifier_activation='''softmax''', )
UpperCAmelCase_ : int = original_model.trainable_variables
UpperCAmelCase_ : List[Any] = original_model.non_trainable_variables
UpperCAmelCase_ : Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase_ : Dict = param.numpy()
UpperCAmelCase_ : Dict = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase_ : str = get_efficientnet_config(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
UpperCAmelCase_ : Optional[int] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase_ : List[Any] = rename_keys(SCREAMING_SNAKE_CASE__ )
replace_params(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# Initialize preprocessor and preprocess input image
UpperCAmelCase_ : Optional[Any] = convert_image_processor(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = preprocessor(images=prepare_img(), return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase_ : str = hf_model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase_ : str = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST )
UpperCAmelCase_ : Union[str, Any] = image.img_to_array(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = np.expand_dims(SCREAMING_SNAKE_CASE__, axis=0 )
UpperCAmelCase_ : Optional[int] = original_model.predict(SCREAMING_SNAKE_CASE__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
os.mkdir(SCREAMING_SNAKE_CASE__ )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
UpperCAmelCase_ : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE__ )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
snake_case_ : List[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 644
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
import re
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
try:
UpperCAmelCase = split_input(UpperCamelCase__ )
if upper:
UpperCAmelCase = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
return to_simple_case(UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
try:
UpperCAmelCase = to_simple_case(UpperCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
return to_complex_case(UpperCamelCase__ , UpperCamelCase__ , '''_''' )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
return to_complex_case(UpperCamelCase__ , UpperCamelCase__ , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 130
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase__ ) , torch_builtin(lowerCamelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase__ ) , gelu_new(lowerCamelCase__ ) ) )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase = get_activation('gelu' )
__lowerCamelCase = get_activation('gelu_10' )
__lowerCamelCase = torch_builtin(lowerCamelCase__ )
__lowerCamelCase = geluaa(lowerCamelCase__ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCamelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowerCamelCase__ ):
get_activation('bogus' )
with self.assertRaises(lowerCamelCase__ ):
get_activation(lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = get_activation('gelu' )
__lowerCamelCase = 1
__lowerCamelCase = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = acta.a
| 469
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( lowercase__ : bytes , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = f'''{sampling_rate}'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '1'
SCREAMING_SNAKE_CASE__ : int = 'f32le'
SCREAMING_SNAKE_CASE__ : int = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowercase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE__ : Dict = ffmpeg_process.communicate(lowercase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE__ : Tuple = output_stream[0]
SCREAMING_SNAKE_CASE__ : Any = np.frombuffer(lowercase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _a ( lowercase__ : int , lowercase__ : float , lowercase__ : str = "f32le" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = f'''{sampling_rate}'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE__ : List[str] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
SCREAMING_SNAKE_CASE__ : List[str] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE__ : Any = 'alsa'
SCREAMING_SNAKE_CASE__ : int = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE__ : str = 'avfoundation'
SCREAMING_SNAKE_CASE__ : Dict = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE__ : Any = 'dshow'
SCREAMING_SNAKE_CASE__ : Any = 'default'
SCREAMING_SNAKE_CASE__ : List[str] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ : List[Any] = _ffmpeg_stream(lowercase__ , lowercase__ )
for item in iterator:
yield item
def _a ( lowercase__ : int , lowercase__ : float , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[Tuple[float, float], float]] = None , lowercase__ : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE__ : Dict = stream_chunk_s
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = chunk_length_s
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ffmpeg_microphone(lowercase__ , lowercase__ , format_for_conversion=lowercase__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE__ : Tuple = np.intaa
SCREAMING_SNAKE_CASE__ : Any = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE__ : Any = np.floataa
SCREAMING_SNAKE_CASE__ : int = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
SCREAMING_SNAKE_CASE__ : str = chunk_length_s / 6
SCREAMING_SNAKE_CASE__ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase__ , (int, float) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE__ : Optional[int] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE__ : Any = datetime.datetime.now()
SCREAMING_SNAKE_CASE__ : Any = datetime.timedelta(seconds=lowercase__ )
for item in chunk_bytes_iter(lowercase__ , lowercase__ , stride=(stride_left, stride_right) , stream=lowercase__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE__ : Any = np.frombuffer(item['raw'] , dtype=lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( lowercase__ : Any , lowercase__ : int , lowercase__ : Tuple[int, int] , lowercase__ : bool = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = b''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(lowercase__ ) < chunk_len:
SCREAMING_SNAKE_CASE__ : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE__ : List[Any] = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE__ : List[str] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE__ : List[str] = False
yield item
SCREAMING_SNAKE_CASE__ : Optional[Any] = stride_left
SCREAMING_SNAKE_CASE__ : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase__ ) > stride_left:
SCREAMING_SNAKE_CASE__ : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE__ : Tuple = False
yield item
def _a ( lowercase__ : List[Any] , lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase__ , stdout=subprocess.PIPE , bufsize=lowercase__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE__ : int = ffmpeg_process.stdout.read(lowercase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 636
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636
| 1
|
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst
| 691
|
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 691
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowercase__ : Dict = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Optional[int] = getattr(_a , _a ).shape
else:
snake_case_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : Tuple = value
elif weight_type == "weight_v":
snake_case_ : Tuple = value
elif weight_type == "bias":
snake_case_ : int = value
else:
snake_case_ : Optional[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = fairseq_model.state_dict()
snake_case_ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : int = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ : Any = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Tuple = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ : List[str] = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : Dict = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : Any = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "bias" in name:
snake_case_ : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Optional[int] = '''weight'''
else:
snake_case_ : List[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowercase ( _a , _a , _a , _a , _a ):
snake_case_ : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case_ : int = name.split('''.''' )
snake_case_ : int = int(items[0] )
snake_case_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case_ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
snake_case_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
snake_case_ : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def __lowercase ( _a , _a , _a=None , _a=None , _a=True ):
if config_path is not None:
snake_case_ : int = UniSpeechSatConfig.from_pretrained(_a )
else:
snake_case_ : Tuple = UniSpeechSatConfig()
snake_case_ : List[Any] = ''''''
if is_finetuned:
snake_case_ : str = UniSpeechSatForCTC(_a )
else:
snake_case_ : Dict = UniSpeechSatForPreTraining(_a )
snake_case_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ : Dict = model[0].eval()
recursively_load_weights(_a , _a )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase__ : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 706
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase__ : List[str] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __lowercase ( _a ):
snake_case_ : List[str] = {}
state_dict.pop('''pixel_mean''' , _a )
state_dict.pop('''pixel_std''' , _a )
snake_case_ : Union[str, Any] = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case_ : Optional[int] = key.replace(_a , _a )
if re.match(_a , _a ):
snake_case_ : Union[str, Any] = int(re.match(_a , _a ).group(2 ) )
if layer_nb == 0:
snake_case_ : Optional[int] = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
snake_case_ : Union[str, Any] = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
snake_case_ : List[Any] = key.replace('''layers.2''' , '''proj_out''' )
snake_case_ : Optional[Any] = value
snake_case_ : Tuple = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __lowercase ( _a , _a , _a , _a="ybelkada/segment-anything" ):
snake_case_ : Optional[Any] = hf_hub_download(_a , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
snake_case_ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
snake_case_ : Optional[Any] = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case_ : Union[str, Any] = SamConfig(
vision_config=_a , )
elif "sam_vit_h" in model_name:
snake_case_ : Tuple = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case_ : List[str] = SamConfig(
vision_config=_a , )
snake_case_ : Tuple = torch.load(_a , map_location='''cpu''' )
snake_case_ : Optional[Any] = replace_keys(_a )
snake_case_ : Any = SamImageProcessor()
snake_case_ : Optional[Any] = SamProcessor(image_processor=_a )
snake_case_ : Tuple = SamModel(_a )
hf_model.load_state_dict(_a )
snake_case_ : Tuple = hf_model.to('''cuda''' )
snake_case_ : Union[str, Any] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
snake_case_ : Union[str, Any] = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
snake_case_ : Tuple = [[[400, 650]]]
snake_case_ : List[str] = [[1]]
snake_case_ : Optional[int] = processor(images=np.array(_a ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
snake_case_ : Optional[Any] = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Optional[Any] = hf_model(**_a )
snake_case_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
snake_case_ : Tuple = ((75, 275, 1_725, 850),)
snake_case_ : Optional[Any] = processor(images=np.array(_a ) , input_boxes=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
snake_case_ : Union[str, Any] = [[[400, 650], [800, 650]]]
snake_case_ : Optional[int] = [[1, 1]]
snake_case_ : Tuple = processor(
images=np.array(_a ) , input_points=_a , input_labels=_a , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
snake_case_ : Dict = hf_model(**_a )
snake_case_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
lowercase__ : Any = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
lowercase__ : Tuple = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 485
| 0
|
from __future__ import annotations
from random import choice
def __lowerCAmelCase ( a__ ) -> str:
return choice(a__ )
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = random_pivot(a__ )
# partition based on pivot
# linear time
__a = [e for e in lst if e < pivot]
__a = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a__ ) < k - 1:
return kth_number(a__ , k - len(a__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(a__ , a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A : Any = logging.get_logger(__name__)
class __A:
def __init__( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
if os.path.isfile(_snake_case ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
__a = os.path.join(_snake_case , '''question_encoder_tokenizer''' )
__a = os.path.join(_snake_case , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(_snake_case )
self.generator.save_pretrained(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , _snake_case )
if config is None:
__a = RagConfig.from_pretrained(_snake_case )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=_snake_case , generator=_snake_case )
def __call__( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.current_tokenizer(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.generator.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return self.generator.decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.generator
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = "longest" , _snake_case = None , _snake_case = True , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
__a = labels['''input_ids''']
return model_inputs
| 219
| 1
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_lowerCAmelCase : str = s_dict.pop(lowerCAmelCase__ )
elif "subsample" in key:
_lowerCAmelCase : Optional[Any] = s_dict.pop(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Tuple = emb.weight.shape
_lowerCAmelCase : List[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = torch.load(lowerCAmelCase__ , map_location="cpu" )
_lowerCAmelCase : Dict = mam_aaa["args"]
_lowerCAmelCase : Optional[Any] = mam_aaa["model"]
_lowerCAmelCase : Optional[Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(lowerCAmelCase__ )
rename_keys(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
_lowerCAmelCase : Dict = args.share_decoder_input_output_embed
_lowerCAmelCase : str = [int(lowerCAmelCase__ ) for i in args.conv_kernel_sizes.split("," )]
_lowerCAmelCase : Any = SpeechaTextConfig(
vocab_size=lowerCAmelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(lowerCAmelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowerCAmelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowerCAmelCase__ , num_beams=5 , max_length=2_00 , use_cache=lowerCAmelCase__ , decoder_start_token_id=2 , early_stopping=lowerCAmelCase__ , )
_lowerCAmelCase : Union[str, Any] = SpeechaTextForConditionalGeneration(lowerCAmelCase__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0 and not set(lowerCAmelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
_lowerCAmelCase : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase : Dict = lm_head_weights
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 587
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case = False
snake_case = logging.get_logger(__name__)
snake_case = "ybelkada/fonts"
def UpperCamelCase_ ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"Pix2StructImageProcessor. Please upgrade torch." )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , ["torch"] )
_check_torch_version()
_lowerCAmelCase : int = image_tensor.unsqueeze(0 )
_lowerCAmelCase : List[str] = torch.nn.functional.unfold(lowerCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_lowerCAmelCase : Any = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCAmelCase__ , lowerCAmelCase__ , -1 )
_lowerCAmelCase : int = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ = 36 , lowerCAmelCase__ = "black" , lowerCAmelCase__ = "white" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = 5 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Add new lines so that each line is no more than 80 characters.
_lowerCAmelCase : List[Any] = textwrap.TextWrapper(width=80 )
_lowerCAmelCase : Optional[Any] = wrapper.wrap(text=lowerCAmelCase__ )
_lowerCAmelCase : int = "\n".join(lowerCAmelCase__ )
if font_bytes is not None and font_path is None:
_lowerCAmelCase : Optional[int] = io.BytesIO(lowerCAmelCase__ )
elif font_path is not None:
_lowerCAmelCase : Dict = font_path
else:
_lowerCAmelCase : int = hf_hub_download(lowerCAmelCase__ , "Arial.TTF" )
_lowerCAmelCase : str = ImageFont.truetype(lowerCAmelCase__ , encoding="UTF-8" , size=lowerCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCAmelCase : Union[str, Any] = ImageDraw.Draw(Image.new("RGB" , (1, 1) , lowerCAmelCase__ ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = temp_draw.textbbox((0, 0) , lowerCAmelCase__ , lowerCAmelCase__ )
# Create the actual image with a bit of padding around the text.
_lowerCAmelCase : List[str] = text_width + left_padding + right_padding
_lowerCAmelCase : Union[str, Any] = text_height + top_padding + bottom_padding
_lowerCAmelCase : Any = Image.new("RGB" , (image_width, image_height) , lowerCAmelCase__ )
_lowerCAmelCase : List[Any] = ImageDraw.Draw(lowerCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=lowerCAmelCase__ , fill=lowerCAmelCase__ , font=lowerCAmelCase__ )
return image
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
requires_backends(lowerCAmelCase__ , "vision" )
# Convert to PIL image if necessary
_lowerCAmelCase : Union[str, Any] = to_pil_image(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = render_text(lowerCAmelCase__ , **lowerCAmelCase__ )
_lowerCAmelCase : str = max(header_image.width , image.width )
_lowerCAmelCase : Optional[Any] = int(image.height * (new_width / image.width) )
_lowerCAmelCase : Dict = int(header_image.height * (new_width / header_image.width) )
_lowerCAmelCase : int = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCAmelCase : Union[str, Any] = to_numpy_array(lowerCAmelCase__ )
if infer_channel_dimension_format(lowerCAmelCase__ ) == ChannelDimension.LAST:
_lowerCAmelCase : Union[str, Any] = to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST )
return new_image
class __A ( snake_case__ ):
'''simple docstring'''
a_ = ['''flattened_patches''']
def __init__( self , _snake_case = True , _snake_case = True , _snake_case = None , _snake_case = 2048 , _snake_case = False , **_snake_case , ):
super().__init__(**_snake_case )
_lowerCAmelCase : List[Any] = patch_size if patch_size is not None else {"height": 16, "width": 16}
_lowerCAmelCase : Any = do_normalize
_lowerCAmelCase : Tuple = do_convert_rgb
_lowerCAmelCase : Tuple = max_patches
_lowerCAmelCase : Optional[int] = is_vqa
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ):
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
_lowerCAmelCase : Dict = to_channel_dimension_format(_snake_case , ChannelDimension.FIRST )
_lowerCAmelCase : Dict = torch.from_numpy(_snake_case )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = patch_size["height"], patch_size["width"]
_lowerCAmelCase , _lowerCAmelCase : Dict = get_image_size(_snake_case )
# maximize scale s.t.
_lowerCAmelCase : Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCAmelCase : str = max(min(math.floor(scale * image_height / patch_height ) , _snake_case ) , 1 )
_lowerCAmelCase : Any = max(min(math.floor(scale * image_width / patch_width ) , _snake_case ) , 1 )
_lowerCAmelCase : Any = max(num_feasible_rows * patch_height , 1 )
_lowerCAmelCase : Optional[int] = max(num_feasible_cols * patch_width , 1 )
_lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_snake_case , antialias=_snake_case , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : str = torch_extract_patches(_snake_case , _snake_case , _snake_case )
_lowerCAmelCase : Optional[Any] = patches.shape
_lowerCAmelCase : List[Any] = patches_shape[1]
_lowerCAmelCase : str = patches_shape[2]
_lowerCAmelCase : int = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCAmelCase : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCAmelCase : Tuple = torch.arange(_snake_case ).reshape([rows, 1] ).repeat(1 , _snake_case ).reshape([rows * columns, 1] )
_lowerCAmelCase : List[str] = torch.arange(_snake_case ).reshape([1, columns] ).repeat(_snake_case , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCAmelCase : Tuple = row_ids.to(torch.floataa )
_lowerCAmelCase : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : int = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCAmelCase : List[Any] = torch.nn.functional.pad(_snake_case , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCAmelCase : List[Any] = to_numpy_array(_snake_case )
return result
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , **_snake_case ):
if image.dtype == np.uinta:
_lowerCAmelCase : Any = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCAmelCase : str = np.mean(_snake_case )
_lowerCAmelCase : List[Any] = np.std(_snake_case )
_lowerCAmelCase : int = max(_snake_case , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_snake_case , mean=_snake_case , std=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
_lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase : Tuple = patch_size if patch_size is not None else self.patch_size
_lowerCAmelCase : Optional[int] = max_patches if max_patches is not None else self.max_patches
_lowerCAmelCase : List[str] = self.is_vqa
if kwargs.get("data_format" , _snake_case ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
_lowerCAmelCase : Union[str, Any] = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase : Dict = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase : Tuple = [to_numpy_array(_snake_case ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
_lowerCAmelCase : Tuple = kwargs.pop("font_bytes" , _snake_case )
_lowerCAmelCase : List[Any] = kwargs.pop("font_path" , _snake_case )
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : str = [header_text] * len(_snake_case )
_lowerCAmelCase : Union[str, Any] = [
render_header(_snake_case , header_text[i] , font_bytes=_snake_case , font_path=_snake_case )
for i, image in enumerate(_snake_case )
]
if do_normalize:
_lowerCAmelCase : Tuple = [self.normalize(image=_snake_case ) for image in images]
# convert to torch tensor and permute
_lowerCAmelCase : Dict = [
self.extract_flattened_patches(image=_snake_case , max_patches=_snake_case , patch_size=_snake_case )
for image in images
]
# create attention mask in numpy
_lowerCAmelCase : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCAmelCase : int = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_snake_case )
return encoded_outputs
| 587
| 1
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def A ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ :Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase__ :List[str] = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
UpperCamelCase__ :List[str] = f"""{src_lang}-{tgt_lang}"""
UpperCamelCase__ :Tuple = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
UpperCamelCase__ :Dict = os.path.join(_UpperCAmelCase , """README.md""" )
print(f"""Generating {path}""" )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(_UpperCAmelCase )
# make sure we are under the root of the project
UpperCamelCase = Path(__file__).resolve().parent.parent.parent
UpperCamelCase = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 45
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = (UnCLIPScheduler,)
def snake_case__ ( self , **lowerCAmelCase_ ):
__lowercase = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**lowerCAmelCase_ )
return config
def snake_case__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def snake_case__ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def snake_case__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def snake_case__ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCAmelCase_ )
def snake_case__ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def snake_case__ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="fixed_small_log" )
__lowercase = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="learned_range" )
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowerCAmelCase_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowerCAmelCase_ ) - -0.0_01_00_11 < 1E-5
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def snake_case__ ( self ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase_ ):
# 1. predict noise residual
__lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) )
__lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
| 321
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=__a ):
a__ :int = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :int = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :str = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :int = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :Union[str, Any] = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Any:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :List[Any] = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :Optional[Any] = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :Any = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :int = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :str = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :Dict = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :str = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCamelCase ( metaclass=__a ):
a__ :List[Any] = ['''flax''']
def __init__(self , *__UpperCamelCase , **__UpperCamelCase ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def A_ (cls , *__UpperCamelCase , **__UpperCamelCase ) -> str:
requires_backends(cls , ["""flax"""] )
| 138
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : int = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.