code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'vivit'
def __init__( self , __a=2_24 , __a=32 , __a=[2, 16, 16] , __a=3 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu_fast" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1e-06 , __a=True , **__a , ) -> int:
'''simple docstring'''
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = image_size
_UpperCamelCase = num_frames
_UpperCamelCase = tubelet_size
_UpperCamelCase = num_channels
_UpperCamelCase = qkv_bias
super().__init__(**__a)
| 19
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Any = 'T5Config'
def a__ ( lowercase : jnp.array, lowercase : int, lowercase : int ) -> jnp.ndarray:
"""simple docstring"""
_UpperCamelCase = jnp.zeros_like(lowercase )
_UpperCamelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCamelCase = shifted_input_ids.at[:, 0].set(lowercase )
_UpperCamelCase = jnp.where(shifted_input_ids == -100, lowercase, lowercase )
return shifted_input_ids
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'mt5'
_snake_case : Union[str, Any] = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'mt5'
_snake_case : int = MTaConfig
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = 'mt5'
_snake_case : Optional[Any] = MTaConfig
| 98
| 0
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 465
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : Union[str, Any] = ['''pixel_values''']
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = size if size is not None else {"shortest_edge": 384}
SCREAMING_SNAKE_CASE:List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = do_resize
SCREAMING_SNAKE_CASE:List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE:Dict = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE:Any = resample
SCREAMING_SNAKE_CASE:Dict = do_rescale
SCREAMING_SNAKE_CASE:Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE:Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE:Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE:Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : int ,):
SCREAMING_SNAKE_CASE:List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE:Any = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE:Any = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE:Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE__ ,size=(shortest_edge, shortest_edge) ,resample=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[int, float] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return rescale(SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Union[float, List[float]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
return normalize(SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ,data_format=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
SCREAMING_SNAKE_CASE:Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE:List[str] = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE:Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE:List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE:str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE:str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE:Any = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE:Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE:Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE:Tuple = get_size_dict(SCREAMING_SNAKE_CASE__ ,default_to_square=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE:int = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE:Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ ,size=SCREAMING_SNAKE_CASE__ ,crop_pct=SCREAMING_SNAKE_CASE__ ,resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE:Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ ,scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE:Optional[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ ,mean=SCREAMING_SNAKE_CASE__ ,std=SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE:int = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__ )
| 465
| 1
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A = logging.get_logger(__name__)
A = Dict[str, Any]
A = List[Prediction]
@add_end_docstrings(__magic_name__ )
class a__ ( __magic_name__ ):
def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_)
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch.")
requires_backends(self , "vision")
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def a_ ( self : Dict , **UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {}
if "threshold" in kwargs:
__UpperCAmelCase : Dict = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : str , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple):
"""simple docstring"""
return super().__call__(*UpperCamelCase_ , **UpperCamelCase_)
def a_ ( self : int , UpperCamelCase_ : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = load_image(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = torch.IntTensor([[image.height, image.width]])
__UpperCAmelCase : str = self.image_processor(images=[image] , return_tensors="pt")
if self.tokenizer is not None:
__UpperCAmelCase : Any = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt")
__UpperCAmelCase : List[Any] = target_size
return inputs
def a_ ( self : Tuple , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = model_inputs.pop("target_size")
__UpperCAmelCase : Tuple = self.model(**UpperCamelCase_)
__UpperCAmelCase : int = outputs.__class__({"target_size": target_size, **outputs})
if self.tokenizer is not None:
__UpperCAmelCase : str = model_inputs["bbox"]
return model_outputs
def a_ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple=0.9):
"""simple docstring"""
__UpperCAmelCase : Dict = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__UpperCAmelCase , __UpperCAmelCase : str = target_size[0].tolist()
def unnormalize(UpperCamelCase_ : List[str]):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
__UpperCAmelCase , __UpperCAmelCase : int = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1)
__UpperCAmelCase : List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__UpperCAmelCase : Optional[Any] = [unnormalize(UpperCamelCase_) for bbox in model_outputs["bbox"].squeeze(0)]
__UpperCAmelCase : Union[str, Any] = ["score", "label", "box"]
__UpperCAmelCase : Dict = [dict(zip(UpperCamelCase_ , UpperCamelCase_)) for vals in zip(scores.tolist() , UpperCamelCase_ , UpperCamelCase_) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__UpperCAmelCase : List[str] = self.image_processor.post_process_object_detection(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = raw_annotations[0]
__UpperCAmelCase : Optional[int] = raw_annotation["scores"]
__UpperCAmelCase : Dict = raw_annotation["labels"]
__UpperCAmelCase : Tuple = raw_annotation["boxes"]
__UpperCAmelCase : List[Any] = scores.tolist()
__UpperCAmelCase : Any = [self.model.config.idalabel[label.item()] for label in labels]
__UpperCAmelCase : Tuple = [self._get_bounding_box(UpperCamelCase_) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__UpperCAmelCase : Union[str, Any] = ["score", "label", "box"]
__UpperCAmelCase : Optional[int] = [
dict(zip(UpperCamelCase_ , UpperCamelCase_))
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"])
]
return annotation
def a_ ( self : int , UpperCamelCase_ : "torch.Tensor"):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.")
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = box.int().tolist()
__UpperCAmelCase : Dict = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 77
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Union[str, Any] =old_name
if "patch_embed" in old_name:
snake_case__, snake_case__, snake_case__ : int =old_name.split('''.''' )
if layer == "0":
snake_case__ : Tuple =old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
snake_case__ : int =old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
snake_case__ : str =old_name.replace('''3''' , '''convolution2''' )
else:
snake_case__ : Tuple =old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =R'''\b\d{2}\b'''
if bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
snake_case__ : Any =re.search(R'''\d\.\d\d.''' , SCREAMING_SNAKE_CASE ).group()
else:
snake_case__ : List[Any] =re.search(R'''\d\.\d.''' , SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
snake_case__ : int =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
snake_case__ : Tuple =trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
snake_case__ : Union[str, Any] ='''intermediate_stages.''' + trimmed_name
else:
snake_case__ : Optional[int] =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
snake_case__ : Optional[Any] =str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
snake_case__ : Tuple =trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
snake_case__ : Union[str, Any] =trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
snake_case__ : str =trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
snake_case__ : Optional[Any] =trimmed_name.replace('''fc2''' , '''linear_out''' )
snake_case__ : Dict ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , SCREAMING_SNAKE_CASE ):
snake_case__ : int =old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ : Any =new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ : Dict =new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
snake_case__ : List[Any] =new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
snake_case__ : List[Any] =new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
snake_case__ : Union[str, Any] ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ : int =new_name.replace('''norm''' , '''layernorm''' )
snake_case__ : Dict ='''efficientformer.''' + new_name
else:
snake_case__ : List[Any] ='''efficientformer.encoder.''' + new_name
return new_name
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
snake_case__ : List[Any] =checkpoint.pop(SCREAMING_SNAKE_CASE )
snake_case__ : Any =val
return checkpoint
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
def lowercase_ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
snake_case__ : Union[str, Any] =torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
snake_case__ : int =EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE )
snake_case__ : Dict =EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
snake_case__ : List[Any] =config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ : Dict =convert_torch_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
snake_case__ : Any =prepare_img()
snake_case__ : str =2_56
snake_case__ : List[Any] =2_24
snake_case__ : Any =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
snake_case__ : int =processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
# original processing pipeline
snake_case__ : List[str] =Compose(
[
Resize(SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
] )
snake_case__ : Tuple =image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =outputs.logits
snake_case__ : Optional[Any] =(1, 10_00)
if "l1" in model_name:
snake_case__ : Union[str, Any] =torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ : Optional[Any] =torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ : Dict =torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 381
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def a_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = jnp.ones((batch_size, length) ) / length
return scores
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : int = None
_UpperCAmelCase : int = 20
_UpperCAmelCase : List[str] = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase_ )
# tweak scores to not be uniform anymore
_UpperCAmelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCAmelCase : List[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCAmelCase : int = jax.nn.softmax(UpperCAmelCase_ , axis=-1 )
_UpperCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCAmelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase_ , scores.copy() , cur_len=UpperCAmelCase_ ) , axis=-1 )
_UpperCAmelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase_ , scores.copy() , cur_len=UpperCAmelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : int = 10
_UpperCAmelCase : Optional[Any] = 2
# create ramp distribution
_UpperCAmelCase : Any = np.broadcast_to(np.arange(UpperCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCAmelCase : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : int = top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCAmelCase : List[Any] = 5
_UpperCAmelCase : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCAmelCase : Optional[Any] = np.broadcast_to(np.arange(UpperCAmelCase_ )[None, :] , (batch_size, length) ).copy()
_UpperCAmelCase : int = top_k_warp_safety_check(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Union[str, Any] = 10
_UpperCAmelCase : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCAmelCase : str = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCAmelCase : List[Any] = FlaxTopPLogitsWarper(0.8 )
_UpperCAmelCase : List[str] = np.exp(top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCAmelCase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCAmelCase : Any = np.broadcast_to(np.arange(UpperCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCAmelCase : Optional[Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCAmelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCAmelCase : str = top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = 20
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : Any = 0
_UpperCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_ )
# check that min length is applied at length 5
_UpperCAmelCase : Tuple = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCAmelCase : Union[str, Any] = 5
_UpperCAmelCase : Dict = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : str = min_dist_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCAmelCase : Optional[int] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Any = 15
_UpperCAmelCase : Dict = min_dist_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertFalse(jnp.isinf(UpperCAmelCase_ ).any() )
def a_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_ )
# check that all scores are -inf except the bos_token_id score
_UpperCAmelCase : List[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCAmelCase : str = 1
_UpperCAmelCase : List[str] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Tuple = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : Dict = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertFalse(jnp.isinf(UpperCAmelCase_ ).any() )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 20
_UpperCAmelCase : List[Any] = 4
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Optional[int] = 5
_UpperCAmelCase : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCAmelCase : Union[str, Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Optional[int] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[str] = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : Union[str, Any] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
self.assertFalse(jnp.isinf(UpperCAmelCase_ ).any() )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = 4
_UpperCAmelCase : Dict = 10
_UpperCAmelCase : List[str] = 15
_UpperCAmelCase : int = 2
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[str] = 15
# dummy input_ids and scores
_UpperCAmelCase : List[str] = ids_tensor((batch_size, sequence_length) , UpperCAmelCase_ )
_UpperCAmelCase : int = input_ids.copy()
_UpperCAmelCase : Optional[Any] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Dict = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_ )
_UpperCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_ )
_UpperCAmelCase : str = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
_UpperCAmelCase : str = 10
# no processor list
_UpperCAmelCase : Union[str, Any] = temp_dist_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : int = top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : Any = min_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = bos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : List[str] = eos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# with processor list
_UpperCAmelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : str = processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : Optional[Any] = 10
_UpperCAmelCase : int = 15
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Dict = 15
# dummy input_ids and scores
_UpperCAmelCase : Any = ids_tensor((batch_size, sequence_length) , UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = input_ids.copy()
_UpperCAmelCase : str = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : int = scores.copy()
# instantiate all dist processors
_UpperCAmelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCAmelCase : Any = FlaxTopKLogitsWarper(3 )
_UpperCAmelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCAmelCase : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_ )
_UpperCAmelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_ )
_UpperCAmelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
_UpperCAmelCase : int = 10
# no processor list
def run_no_processor_list(UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ):
_UpperCAmelCase : int = temp_dist_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : int = top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : str = top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : Dict = min_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : Any = bos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
_UpperCAmelCase : Dict = eos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
return scores
# with processor list
def run_processor_list(UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
_UpperCAmelCase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCAmelCase : Tuple = processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_ )
return scores
_UpperCAmelCase : Optional[Any] = jax.jit(UpperCAmelCase_ )
_UpperCAmelCase : str = jax.jit(UpperCAmelCase_ )
_UpperCAmelCase : Tuple = jitted_run_no_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : List[str] = jitted_run_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 416
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[Any] = 'bart'
UpperCAmelCase__ : Optional[Any] = True
@st.cache(allow_output_mutation=_UpperCamelCase )
def _A ( ):
if LOAD_DENSE_INDEX:
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCAmelCase : Tuple = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCAmelCase : int = qar_model.eval()
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCAmelCase : str = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCAmelCase : Optional[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCAmelCase : Dict = sas_model.eval()
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCamelCase )
def _A ( ):
if LOAD_DENSE_INDEX:
_UpperCAmelCase : Optional[int] = faiss.StandardGpuResources()
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_UpperCAmelCase : str = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCAmelCase : Optional[Any] = faiss.IndexFlatIP(128 )
_UpperCAmelCase : str = faiss.index_cpu_to_gpu(_UpperCamelCase , 1 , _UpperCamelCase )
wikiaab_gpu_index_flat.add(_UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCAmelCase , _UpperCAmelCase : List[str] = (None, None)
_UpperCAmelCase : Dict = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCamelCase )
def _A ( ):
_UpperCAmelCase : int = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_UpperCAmelCase : Dict = elia['''train_eli5''']
_UpperCAmelCase : Dict = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
_UpperCAmelCase : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCamelCase )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ , UpperCAmelCase__ : str = load_train_data()
def _A ( _UpperCamelCase , _UpperCamelCase=10 ):
_UpperCAmelCase : Union[str, Any] = embed_questions_for_retrieval([question] , _UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase : Tuple = eli5_train_q_index.search(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase : Dict = [elia_train[int(_UpperCamelCase )] for i in I[0]]
return nn_examples
def _A ( _UpperCamelCase , _UpperCamelCase="wiki40b" , _UpperCamelCase="dense" , _UpperCamelCase=10 ):
if source == "none":
_UpperCAmelCase , _UpperCAmelCase : Tuple = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCAmelCase , _UpperCAmelCase : Dict = query_qa_dense_index(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = query_es_index(
_UpperCamelCase , _UpperCamelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCamelCase , )
_UpperCAmelCase : List[Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCAmelCase : Union[str, Any] = '''question: {} context: {}'''.format(_UpperCamelCase , _UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCamelCase : None),
} )
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=64 , _UpperCamelCase=256 , _UpperCamelCase=False , _UpperCamelCase=2 , _UpperCamelCase=0.95 , _UpperCamelCase=0.8 ):
with torch.no_grad():
_UpperCAmelCase : Dict = qa_sas_generate(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_answers=1 , num_beams=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase , do_sample=_UpperCamelCase , temp=_UpperCamelCase , top_p=_UpperCamelCase , top_k=_UpperCamelCase , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : Tuple = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Dict = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : Optional[Any] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : Optional[Any] = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : List[Any] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : int = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : Union[str, Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Any = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : str = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : List[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Optional[Any] = 'wiki40b'
UpperCAmelCase__ : str = 'dense'
UpperCAmelCase__ : Optional[Any] = 'beam'
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Tuple = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Dict = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Dict = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : List[str] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : List[str] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Dict = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : Any = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Any = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = None
# start main text
UpperCAmelCase__ : Tuple = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : Dict = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : List[str] = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ : Dict = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : Optional[int] = support_list[:10]
UpperCAmelCase__ : Union[str, Any] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Optional[int] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Optional[Any] = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : List[Any] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : List[Any] = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : str = find_nearest_training(question)
UpperCAmelCase__ : Union[str, Any] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : List[str] = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : List[str] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 416
| 1
|
def A__ ( snake_case_ : int , snake_case_ : int ):
while second != 0:
SCREAMING_SNAKE_CASE__: Optional[int]= first & second
first ^= second
SCREAMING_SNAKE_CASE__: Dict= c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Union[str, Any] = int(input('Enter the first number: ').strip())
lowercase_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
a_ : List[str] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
a_ : Dict = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
a_ : int = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
a_ : Optional[int] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
a_ : List[str] = tempfile.mkdtemp()
a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a_ : List[str] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
# load decoder from hub
a_ : int = 'hf-internal-testing/ngram-beam-search-decoder'
def SCREAMING_SNAKE_CASE ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
a_ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : Optional[int] = self.get_tokenizer()
a_ : Optional[Any] = self.get_feature_extractor()
a_ : Any = self.get_decoder()
a_ : List[str] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
a_ : List[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : Any = self.get_feature_extractor()
a_ : Tuple = self.get_tokenizer()
a_ : Any = self.get_decoder()
a_ : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = floats_list((3, 1_0_0_0) )
a_ : List[str] = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
a_ : Optional[int] = processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
a_ : List[Any] = self.get_feature_extractor()
a_ : Dict = self.get_tokenizer()
a_ : Optional[Any] = self.get_decoder()
a_ : Tuple = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'This is a test string'
a_ : Tuple = processor(text=SCREAMING_SNAKE_CASE__ )
a_ : str = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=(2, 1_0, 1_6) , SCREAMING_SNAKE_CASE__ : int=7_7 ) -> Optional[int]:
np.random.seed(SCREAMING_SNAKE_CASE__ )
return np.random.rand(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Tuple = self.get_tokenizer()
a_ : Optional[Any] = self.get_decoder()
a_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
a_ : Union[str, Any] = processor.decode(SCREAMING_SNAKE_CASE__ )
a_ : int = decoder.decode_beams(SCREAMING_SNAKE_CASE__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : Any = self.get_feature_extractor()
a_ : Union[str, Any] = self.get_tokenizer()
a_ : str = self.get_decoder()
a_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
a_ : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a_ : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
else:
with get_context(SCREAMING_SNAKE_CASE__ ).Pool() as pool:
a_ : Dict = processor.batch_decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = list(SCREAMING_SNAKE_CASE__ )
with get_context('fork' ).Pool() as p:
a_ : List[Any] = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ , a_ , a_ : List[str] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : List[Any] = self.get_feature_extractor()
a_ : Optional[int] = self.get_tokenizer()
a_ : Optional[Any] = self.get_decoder()
a_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
a_ : str = self._get_dummy_logits()
a_ : List[str] = 1_5
a_ : Tuple = -20.0
a_ : Dict = -4.0
a_ : Optional[int] = processor.batch_decode(
SCREAMING_SNAKE_CASE__ , beam_width=SCREAMING_SNAKE_CASE__ , beam_prune_logp=SCREAMING_SNAKE_CASE__ , token_min_logp=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = decoded_processor_out.text
a_ : Any = list(SCREAMING_SNAKE_CASE__ )
with get_context('fork' ).Pool() as pool:
a_ : List[str] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beam_width=SCREAMING_SNAKE_CASE__ , beam_prune_logp=SCREAMING_SNAKE_CASE__ , token_min_logp=SCREAMING_SNAKE_CASE__ , )
a_ : Any = [d[0][0] for d in decoded_decoder_out]
a_ : List[Any] = [d[0][2] for d in decoded_decoder_out]
a_ : int = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ : Any = self.get_feature_extractor()
a_ : List[Any] = self.get_tokenizer()
a_ : str = self.get_decoder()
a_ : int = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self._get_dummy_logits()
a_ : int = 2.0
a_ : Tuple = 5.0
a_ : List[str] = -20.0
a_ : Optional[Any] = True
a_ : int = processor.batch_decode(
SCREAMING_SNAKE_CASE__ , alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , unk_score_offset=SCREAMING_SNAKE_CASE__ , lm_score_boundary=SCREAMING_SNAKE_CASE__ , )
a_ : str = decoded_processor_out.text
a_ : Dict = list(SCREAMING_SNAKE_CASE__ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , unk_score_offset=SCREAMING_SNAKE_CASE__ , lm_score_boundary=SCREAMING_SNAKE_CASE__ , )
with get_context('fork' ).Pool() as pool:
a_ : Optional[int] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
a_ : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
a_ : int = processor.decoder.model_container[processor.decoder._model_key]
a_ : Optional[Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
a_ : Dict = os.listdir(SCREAMING_SNAKE_CASE__ )
a_ : Dict = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
a_ : Dict = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
a_ : List[Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
a_ : List[str] = os.listdir(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = os.listdir(SCREAMING_SNAKE_CASE__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
a_ : Any = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
a_ : int = floats_list((3, 1_0_0_0) )
a_ : Tuple = processor_wavaveca(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
a_ : List[str] = processor_auto(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
a_ : Dict = self._get_dummy_logits()
a_ : Dict = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = processor_auto.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
a_ : int = self.get_feature_extractor()
a_ : Dict = self.get_tokenizer()
a_ : Tuple = self.get_decoder()
a_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
a_ : Tuple = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
a_ : str = self._get_dummy_logits()[0]
a_ : str = processor.decode(SCREAMING_SNAKE_CASE__ , output_word_offsets=SCREAMING_SNAKE_CASE__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
a_ : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
a_ : Dict = self._get_dummy_logits()
a_ : List[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE__ , output_word_offsets=SCREAMING_SNAKE_CASE__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
import torch
a_ : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE__ )
a_ : Dict = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
a_ : str = iter(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = next(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
a_ : Optional[Any] = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a_ : Optional[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
a_ : Any = model(SCREAMING_SNAKE_CASE__ ).logits.cpu().numpy()
a_ : List[Any] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a_ : int = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
a_ : Union[str, Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'word' ) ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'word' ) ) , output.text )
# output times
a_ : Optional[Any] = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'start_time' ) )
a_ : Any = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'end_time' ) )
# fmt: off
a_ : Optional[int] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
a_ : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=0.01 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=0.01 ) )
| 570
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class a ( __UpperCAmelCase ):
_snake_case = '''mgp-str'''
def __init__( self : Optional[Any], SCREAMING_SNAKE_CASE_ : Optional[Any]=[32, 1_28], SCREAMING_SNAKE_CASE_ : Union[str, Any]=4, SCREAMING_SNAKE_CASE_ : Dict=3, SCREAMING_SNAKE_CASE_ : List[str]=27, SCREAMING_SNAKE_CASE_ : str=38, SCREAMING_SNAKE_CASE_ : Optional[int]=5_02_57, SCREAMING_SNAKE_CASE_ : Any=3_05_22, SCREAMING_SNAKE_CASE_ : str=7_68, SCREAMING_SNAKE_CASE_ : Any=12, SCREAMING_SNAKE_CASE_ : Optional[Any]=12, SCREAMING_SNAKE_CASE_ : Tuple=4.0, SCREAMING_SNAKE_CASE_ : str=True, SCREAMING_SNAKE_CASE_ : List[str]=False, SCREAMING_SNAKE_CASE_ : List[Any]=1e-5, SCREAMING_SNAKE_CASE_ : Any=0.0, SCREAMING_SNAKE_CASE_ : int=0.0, SCREAMING_SNAKE_CASE_ : str=0.0, SCREAMING_SNAKE_CASE_ : Dict=False, SCREAMING_SNAKE_CASE_ : List[str]=0.02, **SCREAMING_SNAKE_CASE_ : str, ):
super().__init__(**lowerCAmelCase_ )
snake_case : List[str] = image_size
snake_case : Union[str, Any] = patch_size
snake_case : Dict = num_channels
snake_case : List[Any] = max_token_length
snake_case : List[str] = num_character_labels
snake_case : Union[str, Any] = num_bpe_labels
snake_case : Optional[Any] = num_wordpiece_labels
snake_case : str = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Union[str, Any] = mlp_ratio
snake_case : str = distilled
snake_case : Optional[int] = layer_norm_eps
snake_case : Tuple = drop_rate
snake_case : Dict = qkv_bias
snake_case : Tuple = attn_drop_rate
snake_case : Tuple = drop_path_rate
snake_case : Optional[Any] = output_aa_attentions
snake_case : Optional[int] = initializer_range
| 712
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 555
| 0
|
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82
| 0
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
def A ( ) -> List[str]:
'''simple docstring'''
# Get the sagemaker specific mp parameters from smp_options variable.
__snake_case = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__snake_case = json.loads(snake_case__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__snake_case = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__snake_case = json.loads(snake_case__ )
if not mpi_options.get('sagemaker_mpi_enabled' , snake_case__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def _a ( self) -> List[Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , lowercase_ , )
@cached_property
def _a ( self) -> "torch.device":
logger.info('PyTorch: setting up devices')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch')
if self.no_cuda:
__snake_case = torch.device('cpu')
__snake_case = 0
elif is_sagemaker_model_parallel_available():
__snake_case = smp.local_rank()
__snake_case = torch.device('cuda' , lowercase_)
__snake_case = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta)
__snake_case = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK'))
__snake_case = torch.device('cuda' , self.local_rank)
__snake_case = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__snake_case = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__snake_case = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta)
__snake_case = torch.device('cuda' , self.local_rank)
__snake_case = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_)
return device
@property
def _a ( self) -> Optional[int]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _a ( self) -> Optional[Any]:
return not is_sagemaker_model_parallel_available()
@property
def _a ( self) -> Any:
return False
| 676
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676
| 1
|
"""simple docstring"""
import torch
def _snake_case ( ):
"""simple docstring"""
if torch.cuda.is_available():
_lowerCamelCase : Tuple = torch.cuda.device_count()
else:
_lowerCamelCase : str = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 88
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCamelCase : str = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_() -> str:
UpperCAmelCase = "https://pypi.org/pypi/diffusers/json"
UpperCAmelCase = json.loads(request.urlopen(lowerCamelCase_ ).read() )["releases"].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def lowerCamelCase_() -> Optional[int]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCAmelCase = Path(lowerCamelCase_ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase_(lowerCamelCase_ ) -> Dict:
init_hf_modules()
UpperCAmelCase = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCAmelCase = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[Any]:
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase = re.findall("^\s*import\s+\.(\S+)\s*$" , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def lowerCamelCase_(lowerCamelCase_ ) -> Union[str, Any]:
UpperCAmelCase = False
UpperCAmelCase = [module_file]
UpperCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
UpperCAmelCase = Path(lowerCamelCase_ ).parent
UpperCAmelCase = [str(module_path / m ) for m in new_imports]
UpperCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase = [F'{f}.py' for f in new_import_files]
UpperCAmelCase = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def lowerCamelCase_(lowerCamelCase_ ) -> List[str]:
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase = f.read()
# Imports of the form `import xxx`
UpperCAmelCase = re.findall("^\s*import\s+(\S+)\s*$" , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
UpperCAmelCase = list(set(lowerCamelCase_ ) )
UpperCAmelCase = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
UpperCAmelCase = module_path.replace(os.path.sep , "." )
UpperCAmelCase = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
from ..pipelines import DiffusionPipeline
UpperCAmelCase = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
UpperCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
UpperCAmelCase = cls
return pipeline_class
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Union[str, Any]:
UpperCAmelCase = str(lowerCamelCase_ )
UpperCAmelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
UpperCAmelCase = module_file_or_url
UpperCAmelCase = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
UpperCAmelCase = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
UpperCAmelCase = F'v{revision}'
elif revision == "main":
UpperCAmelCase = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
UpperCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
UpperCAmelCase = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
UpperCAmelCase = "git"
UpperCAmelCase = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
UpperCAmelCase = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
UpperCAmelCase = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
UpperCAmelCase = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase = use_auth_token
elif use_auth_token is True:
UpperCAmelCase = HfFolder.get_token()
else:
UpperCAmelCase = None
UpperCAmelCase = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase = submodule_path / commit_hash
UpperCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ) -> Tuple:
UpperCAmelCase = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace(".py" , "" ) )
| 323
| 0
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : List[str] ) -> int:
"""simple docstring"""
A_ = str(id_ )
A_ = None
A_ = None
A_ = []
A_ = {} # {vertex:distance}
def __lt__( self : str , _snake_case : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.key < other.key
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return self.id
def lowerCamelCase__ ( self : List[str] , _snake_case : int ) -> int:
"""simple docstring"""
self.neighbors.append(UpperCAmelCase__ )
def lowerCamelCase__ ( self : Dict , _snake_case : List[Any] , _snake_case : List[str] ) -> Any:
"""simple docstring"""
A_ = weight
def A_ (__a , __a , __a , __a ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __a )
graph[b - 1].add_edge(graph[a - 1] , __a )
def A_ (__a , __a ):
'''simple docstring'''
A_ = []
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = graph[:]
while q:
A_ = min(__a )
q.remove(__a )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
for i in range(1 , len(__a ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A_ (__a , __a ):
'''simple docstring'''
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = list(__a )
hq.heapify(__a )
while h:
A_ = hq.heappop(__a )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
hq.heapify(__a )
for i in range(1 , len(__a ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A_ ():
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
def A_ (__a ):
'''simple docstring'''
A_ = current_set.copy()
for row_index, row in enumerate(__a ):
A_ = row[0]
for column_index, column in enumerate(__a ):
if magnitude == 0:
A_ = column
continue
A_ = column / magnitude
# Subtract to cancel term
A_ = current_set[0]
A_ = [first_row]
A_ = current_set[1::]
for row in current_set:
A_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__a )
continue
for column_index in range(len(__a ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__a )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ = final_set[0]
A_ = []
A_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ = simplify(__a )
for i in range(len(__a ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __a )
A_ = resultant
return final_set
def A_ (__a ):
'''simple docstring'''
if len(__a ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
A_ = len(__a ) + 1
if any(len(__a ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(__a , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(__a ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ = equations.copy()
if any(0 in row for row in data_set ):
A_ = data_set.copy()
A_ = []
for row_index, row in enumerate(__a ):
if 0 not in row:
A_ = data_set.pop(__a )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , __a )
A_ = data_set.copy()
A_ = simplify(__a )
A_ = simplified[::-1]
A_ = []
for row in simplified:
A_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ = row.copy()[: len(__a ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__a ) == 0:
solutions.append(0 )
continue
A_ = temp_row[1::]
A_ = temp_row[::-1]
for column_index, column in enumerate(__a ):
current_solution -= column * solutions[column_index]
solutions.append(__a )
A_ = []
for item in solutions:
final.append(float(round(__a , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 482
| 0
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
lowercase__ = ""
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE_ )]
else:
lowercase__ = str(SCREAMING_SNAKE_CASE_ )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 413
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 5000_0000 ):
lowercase__ = set()
lowercase__ = int((limit - 24) ** (1 / 2) )
lowercase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE_ ) ) )
for primea in primes:
lowercase__ = primea * primea
for primea in primes:
lowercase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ = primea * primea * primea * primea
lowercase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 413
| 1
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_a = get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 716
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case : List[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['BeitFeatureExtractor']
_snake_case : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 377
| 0
|
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase_ = 'path-to-your-trained-model'
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase_ = 'A photo of sks dog in a bucket'
lowerCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 411
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : Dict = []
__lowercase : Optional[int] = []
__lowercase : Any = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
__lowercase : List[Any] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ), "Stack".center(_lowerCamelCase ), "Postfix".center(_lowerCamelCase ), sep=" | ", )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ), ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ), ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ), sep=" | ", ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ), ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ), ("".join(_lowerCamelCase )).ljust(_lowerCamelCase ), sep=" | ", ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def snake_case__ ( _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
__lowercase : List[Any] = ")" # change "(" to ")"
elif infix[i] == ")":
__lowercase : List[Any] = "(" # change ")" to "("
return (infix_2_postfix("".join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__A : Optional[Any] = input('\nEnter an Infix Equation = ') # Input an Infix equation
__A : List[Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 575
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
A__ : Dict = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Tuple:
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase_ : List[Any] ="lm_head"
lowerCamelCase_ : List[str] =getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ : Dict =getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ : List[Any] =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase_ : Optional[Any] =value
elif weight_type == "weight_g":
lowerCamelCase_ : str =value
elif weight_type == "weight_v":
lowerCamelCase_ : Tuple =value
elif weight_type == "bias":
lowerCamelCase_ : int =value
else:
lowerCamelCase_ : Any =value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) -> Tuple:
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : Optional[Any] =fairseq_model.state_dict()
lowerCamelCase_ : Tuple =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : List[str] =False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ : Optional[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ : Tuple ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ : Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase_ : str =name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ : Any =mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
lowerCamelCase_ : List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase_ : Dict ="weight_v"
elif "bias" in name:
lowerCamelCase_ : str ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : Any ="weight"
else:
lowerCamelCase_ : List[Any] =None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ) -> Tuple:
lowerCamelCase_ : List[Any] =full_name.split("conv_layers." )[-1]
lowerCamelCase_ : List[Any] =name.split("." )
lowerCamelCase_ : int =int(items[0] )
lowerCamelCase_ : Any =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase_ : Optional[int] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase_ : Union[str, Any] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase_ : List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase_ : List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=True ) -> Dict:
if config_path is not None:
lowerCamelCase_ : Optional[Any] =UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ : Any =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase_ : List[str] =Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ : int =target_dict.pad_index
lowerCamelCase_ : Union[str, Any] =target_dict.bos_index
lowerCamelCase_ : int =target_dict.eos_index
lowerCamelCase_ : Any =len(target_dict.symbols )
lowerCamelCase_ : List[Any] =os.path.join(lowerCamelCase__ , "vocab.json" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ : List[Any] =42
lowerCamelCase_ : Optional[int] =43
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase__ , )
lowerCamelCase_ : Any =True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ : List[str] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCamelCase_ : int =WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =UniSpeechForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ : Tuple =UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : List[Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase_ : List[str] =model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A__ : Dict = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 244
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
if len(lowerCamelCase__ ) == 0:
return False
lowerCamelCase_ : Dict =len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
A__ : Tuple = input('Enter numbers separated by comma:\n').strip()
A__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(',')]
A__ : Optional[Any] = int(input('Enter the number to be found in the list:\n').strip())
A__ : str = '' if binary_search(sequence, target) else 'not '
print(f'{target} was {not_str}found in {sequence}')
| 244
| 1
|
import argparse
import os
import re
import packaging.version
a__ = """examples/"""
a__ = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
a__ = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
a__ = """README.md"""
def _UpperCAmelCase ( a : List[str] , a : str , a : Dict ):
with open(a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ = f.read()
snake_case__ , snake_case__ = REPLACE_PATTERNS[pattern]
snake_case__ = replace.replace("""VERSION""" , a )
snake_case__ = re_pattern.sub(a , a )
with open(a , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(a )
def _UpperCAmelCase ( a : Union[str, Any] ):
for folder, directories, fnames in os.walk(a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(a , a ) , a , pattern="""examples""" )
def _UpperCAmelCase ( a : Union[str, Any] , a : List[str]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a , a , a )
if not patch:
update_version_in_examples(a )
def _UpperCAmelCase ( ):
snake_case__ = """🤗 Transformers currently provides the following architectures"""
snake_case__ = """1. Want to contribute a new model?"""
with open(a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case__ = f.readlines()
# Find the start of the list.
snake_case__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(a , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a )
def _UpperCAmelCase ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case__ = f.read()
snake_case__ = REPLACE_PATTERNS["""init"""][0].search(a ).groups()[0]
return packaging.version.parse(a )
def _UpperCAmelCase ( a : str=False ):
snake_case__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case__ = default_version.base_version
elif patch:
snake_case__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
snake_case__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
snake_case__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(a ) == 0:
snake_case__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(a , patch=a )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _UpperCAmelCase ( ):
snake_case__ = get_version()
snake_case__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
snake_case__ = current_version.base_version
# Check with the user we got that right.
snake_case__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(a ) == 0:
snake_case__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(a )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 654
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = params
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array([len(UpperCamelCase__) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCamelCase__ : Any):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Union[str, Any]):
'''simple docstring'''
return len(self.lengths)
def __magic_name__ ( self : str):
'''simple docstring'''
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
snake_case__ = self.params.max_model_input_size
snake_case__ = self.lengths > max_len
logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''')
def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple):
return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)]
snake_case__ = []
snake_case__ = []
if self.params.mlm:
snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
snake_case__ = []
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__)
if sub_s[-1] != sep_id:
snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__)
assert len(UpperCamelCase__) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCamelCase__)
new_tok_ids.extend(UpperCamelCase__)
new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs])
snake_case__ = np.array(UpperCamelCase__)
snake_case__ = np.array(UpperCamelCase__)
def __magic_name__ ( self : Any):
'''simple docstring'''
snake_case__ = len(self)
snake_case__ = self.lengths > 1_1
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def __magic_name__ ( self : List[str]):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = len(self)
snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
snake_case__ = (unk_occs / self.lengths) < 0.5
snake_case__ = self.token_ids[indices]
snake_case__ = self.lengths[indices]
snake_case__ = len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]):
'''simple docstring'''
snake_case__ = [t[0] for t in batch]
snake_case__ = [t[1] for t in batch]
assert len(UpperCamelCase__) == len(UpperCamelCase__)
# Max for paddings
snake_case__ = max(UpperCamelCase__)
# Pad token ids
if self.params.mlm:
snake_case__ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case__ = self.params.special_tok_ids["""unk_token"""]
snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids]
assert len(tk_) == len(UpperCamelCase__)
assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_)
snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_)
snake_case__ = torch.tensor(UpperCamelCase__) # (bs)
return tk_t, lg_t
| 654
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE:
def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : Any=[10, 20, 30, 40] , UpperCamelCase_ : List[Any]=[1, 1, 2, 1] , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : str="relu" , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Dict=None , ) -> Any:
SCREAMING_SNAKE_CASE__ :int = parent
SCREAMING_SNAKE_CASE__ :Dict = batch_size
SCREAMING_SNAKE_CASE__ :int = image_size
SCREAMING_SNAKE_CASE__ :List[Any] = num_channels
SCREAMING_SNAKE_CASE__ :List[Any] = embeddings_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE__ :Dict = depths
SCREAMING_SNAKE_CASE__ :str = is_training
SCREAMING_SNAKE_CASE__ :str = use_labels
SCREAMING_SNAKE_CASE__ :Dict = hidden_act
SCREAMING_SNAKE_CASE__ :Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ :Optional[int] = scope
SCREAMING_SNAKE_CASE__ :int = len(UpperCamelCase_ )
def __lowerCamelCase ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ :List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ :Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ :Dict = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self : Optional[int] ) -> List[str]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Any = TFResNetModel(config=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :str = self.num_labels
SCREAMING_SNAKE_CASE__ :List[Any] = TFResNetForImageClassification(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = config_and_inputs
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : Dict = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A_ : int = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A_ : Union[str, Any] = False
A_ : Tuple = False
A_ : int = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCamelCase ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ :List[Any] = TFResNetModelTester(self )
SCREAMING_SNAKE_CASE__ :List[str] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self : List[Any] ) -> List[str]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self : List[Any] ) -> List[str]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
pass
def __lowerCamelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :str = model_class(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ :Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ :Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __lowerCamelCase ( self : str ) -> Any:
def check_hidden_states_output(UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ :List[Any] = model_class(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ :List[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ :Optional[int] = layer_type
SCREAMING_SNAKE_CASE__ :List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ :Tuple = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ :Tuple = TFResNetModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ :List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE__ :str = prepare_img()
SCREAMING_SNAKE_CASE__ :Dict = image_processor(images=UpperCamelCase_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ :List[str] = model(**UpperCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ :int = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase_ , atol=1e-4 ) )
| 320
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCamelCase_ = logging.getLogger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if os.path.exists(UpperCAmelCase__ ):
if os.path.exists(os.path.join(UpperCAmelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , 'config.json' ) ):
os.remove(os.path.join(UpperCAmelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCAmelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]=False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = 2
if unlogit:
SCREAMING_SNAKE_CASE__ :int = torch.pow(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = p * torch.log(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :str = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCAmelCase__ ) ) ) )
for row in range(len(UpperCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Tuple = model.config.num_hidden_layers, model.config.num_attention_heads
SCREAMING_SNAKE_CASE__ :List[str] = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.zeros(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
if head_mask is None:
SCREAMING_SNAKE_CASE__ :Any = torch.ones(UpperCAmelCase__ , UpperCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
SCREAMING_SNAKE_CASE__ :Optional[Any] = None
SCREAMING_SNAKE_CASE__ :Dict = 0.0
SCREAMING_SNAKE_CASE__ :Any = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((SCREAMING_SNAKE_CASE__) , ) :Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Optional[int] = entropy(attn.detach() , UpperCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
SCREAMING_SNAKE_CASE__ :List[str] = 2
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.pow(torch.pow(UpperCAmelCase__ , UpperCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
SCREAMING_SNAKE_CASE__ :Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCAmelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCAmelCase__ )
logger.info('Head ranked by importance scores' )
SCREAMING_SNAKE_CASE__ :str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
SCREAMING_SNAKE_CASE__ :Any = torch.arange(
head_importance.numel() , device=args.device )
SCREAMING_SNAKE_CASE__ :Optional[Any] = head_ranks.view_as(UpperCAmelCase__ )
print_ad_tensor(UpperCAmelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCAmelCase__ , original_score * args.masking_threshold )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.ones_like(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
SCREAMING_SNAKE_CASE__ :str = original_score
while current_score >= original_score * args.masking_threshold:
SCREAMING_SNAKE_CASE__ :Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
SCREAMING_SNAKE_CASE__ :str = float('Inf' )
SCREAMING_SNAKE_CASE__ :str = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
SCREAMING_SNAKE_CASE__ :Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
SCREAMING_SNAKE_CASE__ :List[Any] = new_head_mask.view(-1 )
SCREAMING_SNAKE_CASE__ :str = 0.0
SCREAMING_SNAKE_CASE__ :Any = new_head_mask.view_as(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :str = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase__ )
# Compute metric and head importance again
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Any = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCAmelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Any = 1 / loss
SCREAMING_SNAKE_CASE__ :List[Any] = datetime.now() - before_time
SCREAMING_SNAKE_CASE__ :Union[str, Any] = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ :Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Any = [
v,
]
assert sum(len(UpperCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE__ :Optional[int] = datetime.now()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[Any] = compute_heads_importance(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , compute_entropy=UpperCAmelCase__ , compute_importance=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , actually_pruned=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ :List[Any] = 1 / loss
SCREAMING_SNAKE_CASE__ :Optional[int] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCAmelCase__ , UpperCAmelCase__ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCAmelCase__ , UpperCAmelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(UpperCAmelCase__ , args.output_dir )
def lowerCamelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCAmelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCAmelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCAmelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCAmelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCAmelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCAmelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=UpperCAmelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCAmelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCAmelCase__ , default=4_2 )
parser.add_argument('--local_rank' , type=UpperCAmelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCAmelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCAmelCase__ , default='' , help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
SCREAMING_SNAKE_CASE__ :Tuple = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
SCREAMING_SNAKE_CASE__ :Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.device('cuda' , args.local_rank )
SCREAMING_SNAKE_CASE__ :List[Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
SCREAMING_SNAKE_CASE__ :Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
SCREAMING_SNAKE_CASE__ :Tuple = nn.parallel.DistributedDataParallel(
UpperCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase__ )
elif args.n_gpu > 1:
SCREAMING_SNAKE_CASE__ :Optional[int] = nn.DataParallel(UpperCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCAmelCase__ )
# Prepare dataset
SCREAMING_SNAKE_CASE__ :Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = (torch.from_numpy(UpperCAmelCase__ ),)
SCREAMING_SNAKE_CASE__ :List[str] = TensorDataset(*UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = RandomSampler(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = DataLoader(UpperCAmelCase__ , sampler=UpperCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
SCREAMING_SNAKE_CASE__ :Tuple = mask_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
prune_heads(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 320
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 597
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["BeitFeatureExtractor"]
SCREAMING_SNAKE_CASE_ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 597
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( _UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''trocr'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''past_key_values''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , snake_case=5_0265 , snake_case=1024 , snake_case=12 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=512 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=2 , snake_case=0.02 , snake_case=0.0 , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ):
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = activation_function
snake_case_ = max_position_embeddings
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = init_std
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = scale_embedding
snake_case_ = use_learned_position_embeddings
snake_case_ = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 718
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
snake_case_ = sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = ['speech']
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["speech"] )
class lowerCamelCase__ ( metaclass=UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = ['speech']
def __init__( self , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , ["speech"] )
| 551
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCamelCase_( _A :Tuple , _A :str )-> int:
# ===== initialization =====
UpperCamelCase__ = Mock()
UpperCamelCase__ = conn, Mock()
UpperCamelCase__ = iter([1, None] )
UpperCamelCase__ = lambda _A : next(_A )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 551
| 1
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : BigBirdConfig
__magic_name__ : jnp.dtype = jnp.floataa
__magic_name__ : bool = True
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
super().setup()
UpperCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase__ ( A : str , A : Any , A : int , A : str , A : List[str] , A : List[Any] ):
'''simple docstring'''
def cross_entropy(A : Optional[int] , A : Tuple , A : int=None ):
UpperCAmelCase = logits.shape[-1]
UpperCAmelCase = (labels[..., None] == jnp.arange(A )[None]).astype('''f4''' )
UpperCAmelCase = jax.nn.log_softmax(A , axis=-1 )
UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCAmelCase = reduction(A )
return loss
UpperCAmelCase = partial(A , reduction=jnp.mean )
UpperCAmelCase = cross_entropy(A , A )
UpperCAmelCase = cross_entropy(A , A )
UpperCAmelCase = cross_entropy(A , A )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase__:
__magic_name__ : str = "google/bigbird-roberta-base"
__magic_name__ : int = 3000
__magic_name__ : int = 1_0500
__magic_name__ : int = 128
__magic_name__ : int = 3
__magic_name__ : int = 1
__magic_name__ : int = 5
# tx_args
__magic_name__ : float = 3e-5
__magic_name__ : float = 0.0
__magic_name__ : int = 2_0000
__magic_name__ : float = 0.0_0_9_5
__magic_name__ : str = "bigbird-roberta-natural-questions"
__magic_name__ : str = "training-expt"
__magic_name__ : str = "data/nq-training.jsonl"
__magic_name__ : str = "data/nq-validation.jsonl"
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCAmelCase )
UpperCAmelCase = os.path.join(self.base_dir , self.save_dir )
UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase__:
__magic_name__ : int
__magic_name__ : int = 4096 # no dynamic padding on TPUs
def __call__( self : List[str] , lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.collate_fn(lowerCAmelCase )
UpperCAmelCase = jax.tree_util.tree_map(lowerCAmelCase , lowerCAmelCase )
return batch
def a__( self : List[str] , lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.fetch_inputs(features['''input_ids'''] )
UpperCAmelCase = {
'''input_ids''': jnp.array(lowerCAmelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowerCAmelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def a__( self : Tuple , lowerCAmelCase : list )-> int:
"""simple docstring"""
UpperCAmelCase = [self._fetch_inputs(lowerCAmelCase ) for ids in input_ids]
return zip(*lowerCAmelCase )
def a__( self : Dict , lowerCAmelCase : list )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [1 for _ in range(len(lowerCAmelCase ) )]
while len(lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCamelCase__ ( A : str , A : Optional[Any] , A : List[str]=None ):
'''simple docstring'''
if seed is not None:
UpperCAmelCase = dataset.shuffle(seed=A )
for i in range(len(A ) // batch_size ):
UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A )
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase__ ( A : List[str] , A : Optional[int] , **A : int ):
'''simple docstring'''
def loss_fn(A : Dict ):
UpperCAmelCase = model_inputs.pop('''start_labels''' )
UpperCAmelCase = model_inputs.pop('''end_labels''' )
UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
UpperCAmelCase = state.apply_fn(**A , params=A , dropout_rng=A , train=A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
return state.loss_fn(
A , A , A , A , A , A , )
UpperCAmelCase , UpperCAmelCase = jax.random.split(A )
UpperCAmelCase = jax.value_and_grad(A )
UpperCAmelCase , UpperCAmelCase = grad_fn(state.params )
UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
UpperCAmelCase = jax.lax.pmean(A , '''batch''' )
UpperCAmelCase = state.apply_gradients(grads=A )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase__ ( A : List[str] , **A : List[str] ):
'''simple docstring'''
UpperCAmelCase = model_inputs.pop('''start_labels''' )
UpperCAmelCase = model_inputs.pop('''end_labels''' )
UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
UpperCAmelCase = state.apply_fn(**A , params=state.params , train=A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
UpperCAmelCase = state.loss_fn(A , A , A , A , A , A )
UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class UpperCamelCase__( train_state.TrainState ):
__magic_name__ : Callable = struct.field(pytree_node=lowerCAmelCase )
@dataclass
class UpperCamelCase__:
__magic_name__ : Args
__magic_name__ : Callable
__magic_name__ : Callable
__magic_name__ : Callable
__magic_name__ : Callable
__magic_name__ : wandb
__magic_name__ : Callable = None
def a__( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=None )-> Tuple:
"""simple docstring"""
UpperCAmelCase = model.params
UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=lowerCAmelCase , tx=lowerCAmelCase , loss_fn=lowerCAmelCase , )
if ckpt_dir is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = restore_checkpoint(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
UpperCAmelCase , UpperCAmelCase = build_tx(**lowerCAmelCase )
UpperCAmelCase = train_state.TrainState(
step=lowerCAmelCase , apply_fn=model.__call__ , params=lowerCAmelCase , tx=lowerCAmelCase , opt_state=lowerCAmelCase , )
UpperCAmelCase = args
UpperCAmelCase = data_collator
UpperCAmelCase = lr
UpperCAmelCase = params
UpperCAmelCase = jax_utils.replicate(lowerCAmelCase )
return state
def a__( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.args
UpperCAmelCase = len(lowerCAmelCase ) // args.batch_size
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase = get_batched_dataset(lowerCAmelCase , args.batch_size , seed=lowerCAmelCase )
UpperCAmelCase = 0
for batch in tqdm(lowerCAmelCase , total=lowerCAmelCase , desc=F"""Running EPOCH-{epoch}""" ):
UpperCAmelCase = self.data_collator(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.train_step_fn(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase = jax_utils.unreplicate(state.step )
UpperCAmelCase = running_loss.item() / i
UpperCAmelCase = self.scheduler_fn(state_step - 1 )
UpperCAmelCase = self.evaluate(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowerCAmelCase ) )
self.logger.log(lowerCAmelCase , commit=lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=lowerCAmelCase )
def a__( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase = get_batched_dataset(lowerCAmelCase , self.args.batch_size )
UpperCAmelCase = len(lowerCAmelCase ) // self.args.batch_size
UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase = 0
for batch in tqdm(lowerCAmelCase , total=lowerCAmelCase , desc='''Evaluating ... ''' ):
UpperCAmelCase = self.data_collator(lowerCAmelCase )
UpperCAmelCase = self.val_step_fn(lowerCAmelCase , **lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def a__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase = jax_utils.unreplicate(lowerCAmelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(lowerCAmelCase , params=state.params )
with open(os.path.join(lowerCAmelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCAmelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowerCAmelCase , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCAmelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowerCAmelCase )
print('''DONE''' )
def lowerCamelCase__ ( A : Tuple , A : Dict ):
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=''' ... ''' )
with open(os.path.join(A , '''flax_model.msgpack''' ) , '''rb''' ) as f:
UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(A , '''opt_state.msgpack''' ) , '''rb''' ) as f:
UpperCAmelCase = from_bytes(state.opt_state , f.read() )
UpperCAmelCase = joblib.load(os.path.join(A , '''args.joblib''' ) )
UpperCAmelCase = joblib.load(os.path.join(A , '''data_collator.joblib''' ) )
with open(os.path.join(A , '''training_state.json''' ) , '''r''' ) as f:
UpperCAmelCase = json.load(A )
UpperCAmelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def lowerCamelCase__ ( A : Any , A : Any , A : Union[str, Any] , A : Tuple ):
'''simple docstring'''
UpperCAmelCase = num_train_steps - warmup_steps
UpperCAmelCase = optax.linear_schedule(init_value=A , end_value=A , transition_steps=A )
UpperCAmelCase = optax.linear_schedule(init_value=A , end_value=1E-7 , transition_steps=A )
UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCamelCase__ ( A : List[Any] , A : Optional[Any] , A : str , A : Optional[Any] , A : Any ):
'''simple docstring'''
def weight_decay_mask(A : List[str] ):
UpperCAmelCase = traverse_util.flatten_dict(A )
UpperCAmelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(A )
UpperCAmelCase = scheduler_fn(A , A , A , A )
UpperCAmelCase = optax.adamw(learning_rate=A , weight_decay=A , mask=A )
return tx, lr
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: dict ):
__SCREAMING_SNAKE_CASE : List[Any] = set()
# edges = list of graph's edges
__SCREAMING_SNAKE_CASE : int = get_edges(_lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = edges.pop()
chosen_vertices.add(_lowerCamelCase )
chosen_vertices.add(_lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase )
return chosen_vertices
def lowerCAmelCase_ ( _lowerCamelCase: dict ):
__SCREAMING_SNAKE_CASE : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 578
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __lt__( self : Union[str, Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Optional[int] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCAmelCase_ ( _lowerCamelCase: list ):
__SCREAMING_SNAKE_CASE : list[Stack] = []
# sort into stacks
for element in collection:
__SCREAMING_SNAKE_CASE : str = Stack([element] )
__SCREAMING_SNAKE_CASE : Tuple = bisect_left(_lowerCamelCase , _lowerCamelCase )
if i != len(_lowerCamelCase ):
stacks[i].append(_lowerCamelCase )
else:
stacks.append(_lowerCamelCase )
# use a heap-based merge to merge stack efficiently
__SCREAMING_SNAKE_CASE : List[Any] = merge(*(reversed(_lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : Any = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 578
| 1
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
SCREAMING_SNAKE_CASE = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
SCREAMING_SNAKE_CASE = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
SCREAMING_SNAKE_CASE = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
SCREAMING_SNAKE_CASE = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[int] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
SCREAMING_SNAKE_CASE = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
SCREAMING_SNAKE_CASE = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(_lowerCAmelCase )
class __a :
def __call__( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Union[bool, str] = False , UpperCAmelCase_ : Union[bool, str] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Tuple , )-> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
elif titles is None or texts is None:
UpperCamelCase = titles if texts is None else texts
return super().__call__(
UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase = titles if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [titles]
UpperCamelCase = texts if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [texts]
UpperCamelCase = len(UpperCAmelCase_ )
UpperCamelCase = questions if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else [questions] * n_passages
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(UpperCAmelCase_ )} titles and {len(UpperCAmelCase_ )} texts." )
UpperCamelCase = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
UpperCamelCase = super().__call__(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )["input_ids"]
UpperCamelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
}
if return_attention_mask is not False:
UpperCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase = attention_mask
return self.pad(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : BatchEncoding , UpperCAmelCase_ : DPRReaderOutput , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : int = 4 , )-> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCamelCase = reader_input["input_ids"]
UpperCamelCase , UpperCamelCase , UpperCamelCase = reader_output[:3]
UpperCamelCase = len(UpperCAmelCase_ )
UpperCamelCase = sorted(range(UpperCAmelCase_ ) , reverse=UpperCAmelCase_ , key=relevance_logits.__getitem__ )
UpperCamelCase = []
for doc_id in sorted_docs:
UpperCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase = len(UpperCAmelCase_ )
UpperCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase_ , top_spans=UpperCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase_ , start_index=UpperCAmelCase_ , end_index=UpperCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , )-> List[DPRSpanPrediction]:
"""simple docstring"""
UpperCamelCase = []
for start_index, start_score in enumerate(UpperCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] , reverse=UpperCAmelCase_ )
UpperCamelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
UpperCamelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowerCAmelCase )
class __a ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = ['''input_ids''', '''attention_mask''']
| 556
|
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ = False )-> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
UpperCamelCase = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
UpperCamelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase_ , 1 ):
if n < _p:
# then we have our last prime to check
UpperCamelCase = primes[:idx]
break
UpperCamelCase , UpperCamelCase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCamelCase = False
for r in range(UpperCAmelCase_ ):
UpperCamelCase = pow(UpperCAmelCase_ , d * 2**r , UpperCAmelCase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCamelCase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCamelCase__ ( )-> None:
"""simple docstring"""
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 556
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 6
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 6
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_A = random.Random()
def lowercase (_snake_case ,_snake_case=1.0 ,_snake_case=None ,_snake_case=None ) -> Optional[int]:
'''simple docstring'''
if rng is None:
__UpperCamelCase = global_rng
__UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , A_ : Tuple , A_ : List[Any]=7 , A_ : Optional[Any]=4_00 , A_ : Dict=20_00 , A_ : Union[str, Any]=20_48 , A_ : str=1_28 , A_ : Tuple=1 , A_ : Dict=5_12 , A_ : Dict=30 , A_ : List[Any]=4_41_00 , )-> int:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = min_seq_length
__UpperCamelCase = max_seq_length
__UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase = spectrogram_length
__UpperCamelCase = feature_size
__UpperCamelCase = num_audio_channels
__UpperCamelCase = hop_length
__UpperCamelCase = chunk_length
__UpperCamelCase = sampling_rate
def A ( self : str )-> str:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A ( self : Optional[Any] , A_ : List[Any]=False , A_ : Optional[Any]=False )-> Optional[Any]:
def _flatten(A_ : Union[str, Any] ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = TvltFeatureExtractor
def A ( self : Optional[int] )-> Tuple:
__UpperCamelCase = TvltFeatureExtractionTester(self )
def A ( self : Optional[Any] )-> Dict:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , "spectrogram_length" ) )
self.assertTrue(hasattr(A_ , "feature_size" ) )
self.assertTrue(hasattr(A_ , "num_audio_channels" ) )
self.assertTrue(hasattr(A_ , "hop_length" ) )
self.assertTrue(hasattr(A_ , "chunk_length" ) )
self.assertTrue(hasattr(A_ , "sampling_rate" ) )
def A ( self : str )-> Optional[int]:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase = self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase = feat_extract_first.to_dict()
__UpperCamelCase = feat_extract_second.to_dict()
__UpperCamelCase = dict_first.pop("mel_filters" )
__UpperCamelCase = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def A ( self : List[str] )-> int:
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = os.path.join(A_ , "feat_extract.json" )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase = self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase = feat_extract_first.to_dict()
__UpperCamelCase = feat_extract_second.to_dict()
__UpperCamelCase = dict_first.pop("mel_filters" )
__UpperCamelCase = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def A ( self : Union[str, Any] )-> Optional[Any]:
# Initialize feature_extractor
__UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCamelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__UpperCamelCase = feature_extractor(A_ , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__UpperCamelCase = feature_extractor(
A_ , return_tensors="np" , sampling_rate=4_41_00 , mask_audio=A_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__UpperCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__UpperCamelCase = np.asarray(A_ )
__UpperCamelCase = feature_extractor(A_ , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def A ( self : List[str] , A_ : Union[str, Any] )-> List[str]:
__UpperCamelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__UpperCamelCase = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def A ( self : Tuple )-> Dict:
__UpperCamelCase = self._load_datasamples(1 )
__UpperCamelCase = TvltFeatureExtractor()
__UpperCamelCase = feature_extractor(A_ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
__UpperCamelCase = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A_ , atol=1e-4 ) )
| 715
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , A_ : Dict , A_ : List[str] , A_ : Optional[int] , A_ : Any=None , A_ : List[Any]=None )-> Dict:
__UpperCamelCase = start
__UpperCamelCase = end
__UpperCamelCase = val
__UpperCamelCase = (start + end) // 2
__UpperCamelCase = left
__UpperCamelCase = right
def __repr__( self : Dict )-> Tuple:
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any , A_ : Sequence , A_ : List[str] )-> str:
__UpperCamelCase = collection
__UpperCamelCase = function
if self.collection:
__UpperCamelCase = self._build_tree(0 , len(A_ ) - 1 )
def A ( self : List[Any] , A_ : Union[str, Any] , A_ : Tuple )-> Tuple:
self._update_tree(self.root , A_ , A_ )
def A ( self : str , A_ : Union[str, Any] , A_ : Any )-> Optional[Any]:
return self._query_range(self.root , A_ , A_ )
def A ( self : int , A_ : Optional[Any] , A_ : Union[str, Any] )-> Optional[int]:
if start == end:
return SegmentTreeNode(A_ , A_ , self.collection[start] )
__UpperCamelCase = (start + end) // 2
__UpperCamelCase = self._build_tree(A_ , A_ )
__UpperCamelCase = self._build_tree(mid + 1 , A_ )
return SegmentTreeNode(A_ , A_ , self.fn(left.val , right.val ) , A_ , A_ )
def A ( self : str , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Dict )-> List[str]:
if node.start == i and node.end == i:
__UpperCamelCase = val
return
if i <= node.mid:
self._update_tree(node.left , A_ , A_ )
else:
self._update_tree(node.right , A_ , A_ )
__UpperCamelCase = self.fn(node.left.val , node.right.val )
def A ( self : Tuple , A_ : Tuple , A_ : Any , A_ : Union[str, Any] )-> Union[str, Any]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , A_ , A_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , A_ , node.mid ) , self._query_range(node.right , node.mid + 1 , A_ ) , )
else:
# range in right child tree
return self._query_range(node.right , A_ , A_ )
def A ( self : Any )-> str:
if self.root is not None:
__UpperCamelCase = Queue()
queue.put(self.root )
while not queue.empty():
__UpperCamelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
_A = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 228
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase__ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.''' )
if i == 0:
snake_case_ , snake_case_ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase__ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.''' )
if i == 0:
snake_case_ , snake_case_ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 362
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowercase ( lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''resnet'''
__SCREAMING_SNAKE_CASE : Dict = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = version.parse('''1.11''' )
@property
def a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a ( self ):
return 1e-3
| 362
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["note_seq"]
def __init__( self :Any , *__A :Union[str, Any] , **__A :Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :Optional[int] , **__A :Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :str , **__A :Optional[int] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 59
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase = data_utils.TransfoXLTokenizer
_lowerCamelCase = data_utils.TransfoXLCorpus
_lowerCamelCase = data_utils
_lowerCamelCase = data_utils
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 59
| 1
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _snake_case :
def __init__( self : Any, __lowercase : Any, __lowercase : int=2, __lowercase : Tuple=32, __lowercase : List[Any]=16, __lowercase : Any=3, __lowercase : Dict=True, __lowercase : str=True, __lowercase : Any=32, __lowercase : Union[str, Any]=4, __lowercase : Optional[int]=[0, 1, 2, 3], __lowercase : Dict=4, __lowercase : Union[str, Any]=37, __lowercase : Any="gelu", __lowercase : List[Any]=0.1, __lowercase : int=0.1, __lowercase : str=0.02, __lowercase : List[str]=3, __lowercase : int=[1, 384, 24, 24], __lowercase : List[Any]=True, __lowercase : Any=None, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = backbone_out_indices
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = backbone_featmap_shape
lowercase__ = scope
lowercase__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def A__ ( self : Optional[Any] ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : List[Any] ):
lowercase__ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__lowercase, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=__lowercase, backbone_featmap_shape=self.backbone_featmap_shape, )
def A__ ( self : Optional[int], __lowercase : Optional[Any], __lowercase : str, __lowercase : Any ):
lowercase__ = DPTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Optional[int], __lowercase : Dict, __lowercase : Optional[Any], __lowercase : int ):
lowercase__ = self.num_labels
lowercase__ = DPTForDepthEstimation(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def A__ ( self : Dict, __lowercase : Optional[int], __lowercase : List[str], __lowercase : Any ):
lowercase__ = self.num_labels
lowercase__ = DPTForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
lowercase__ = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A__ ( self : Optional[int] ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Union[str, Any] =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase__ : int =(
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Union[str, Any] =False
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Dict =False
def A__ ( self : Union[str, Any] ):
lowercase__ = DPTModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37 )
def A__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A__ ( self : Optional[int] ):
pass
def A__ ( self : str ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase, nn.Linear ) )
def A__ ( self : Dict ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : Tuple ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowercase )
def A__ ( self : Union[str, Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
def A__ ( self : Tuple ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
if model_class in get_values(__lowercase ):
continue
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = model(**__lowercase ).loss
loss.backward()
def A__ ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = False
lowercase__ = True
if model_class in get_values(__lowercase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__ = model_class(__lowercase )
model.to(__lowercase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = model(**__lowercase ).loss
loss.backward()
def A__ ( self : Tuple ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=__lowercase )
# Skip the check for the backbone
lowercase__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self : Optional[int] ):
pass
@slow
def A__ ( self : str ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__ = DPTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def A__ ( self : Optional[int] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = "add"
with self.assertRaises(__lowercase ):
lowercase__ = DPTForDepthEstimation(__lowercase )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _snake_case ( unittest.TestCase):
def A__ ( self : str ):
lowercase__ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
lowercase__ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__lowercase )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
lowercase__ = model(**__lowercase )
lowercase__ = outputs.predicted_depth
# verify the predicted depth
lowercase__ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape, __lowercase )
lowercase__ = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, __lowercase, atol=1e-4 ) )
| 413
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( lowercase__):
UpperCamelCase__ : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : int ="""ml.p3.2xlarge"""
UpperCamelCase__ : Any ="""accelerate_sagemaker_execution_role"""
UpperCamelCase__ : Dict ="""hf-sm"""
UpperCamelCase__ : Optional[int] ="""us-east-1"""
UpperCamelCase__ : Optional[Any] =1
UpperCamelCase__ : int ="""accelerate-sagemaker-1"""
UpperCamelCase__ : Union[str, Any] ="""1.6"""
UpperCamelCase__ : str ="""4.4"""
UpperCamelCase__ : str ="""train.py"""
UpperCamelCase__ : Union[str, Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
UpperCamelCase__ : str =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class _snake_case ( unittest.TestCase):
def A__ ( self : Optional[int] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"], __lowercase )
assert isinstance(converted_args["do_train"], __lowercase )
assert isinstance(converted_args["epochs"], __lowercase )
assert isinstance(converted_args["learning_rate"], __lowercase )
assert isinstance(converted_args["max_steps"], __lowercase )
with pytest.raises(__lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 413
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case : Any = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : bool , __lowerCAmelCase : bool ):
def run_func(__lowerCAmelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*__lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple ):
return func(*__UpperCamelCase , **__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[int] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
a__ = random.Random()
a__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class snake_case_ (SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ : int = 4_2
UpperCAmelCase__ : Optional[int] = 4_2
UpperCAmelCase__ : Optional[Any] = '''TensorFlow'''
@property
def lowerCamelCase__( self :Any ) -> List[str]:
return tf.__version__
def lowerCamelCase__( self :List[str] ,__snake_case :List[str] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ) -> List[str]:
# initialize GPU on separate process
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_inference_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_speed(_inference )
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ,__snake_case :str ,__snake_case :Tuple ) -> Any:
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_train_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_speed(_train )
def lowerCamelCase__( self :List[Any] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Tuple ) -> str:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,UpperCamelCase__ )
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_inference_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_memory(_inference )
def lowerCamelCase__( self :Tuple ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :Dict ) -> List[str]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,UpperCamelCase__ )
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_train_func(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
return self._measure_memory(_train )
def lowerCamelCase__( self :Tuple ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :int ) -> Union[str, Any]:
a__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
a__ = (
hasattr(UpperCamelCase__ ,'architectures' )
and isinstance(config.architectures ,UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a__ = __import__('transformers' ,fromlist=[model_class] )
a__ = getattr(UpperCamelCase__ ,UpperCamelCase__ )
a__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
a__ = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
a__ = config.vocab_size if hasattr(UpperCamelCase__ ,'vocab_size' ) else config.encoder.vocab_size
a__ = random_input_ids(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ ,decoder_input_ids=UpperCamelCase__ ,training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ ,training=UpperCamelCase__ )
a__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ,__snake_case :Tuple ) -> List[str]:
a__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
a__ = (
hasattr(UpperCamelCase__ ,'architectures' )
and isinstance(config.architectures ,UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
a__ = __import__('transformers' ,fromlist=[model_class] )
a__ = getattr(UpperCamelCase__ ,UpperCamelCase__ )
a__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
a__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
a__ = config.vocab_size if hasattr(UpperCamelCase__ ,'vocab_size' ) else config.encoder.vocab_size
a__ = random_input_ids(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_train():
a__ = model(UpperCamelCase__ ,decoder_input_ids=UpperCamelCase__ ,labels=UpperCamelCase__ ,training=UpperCamelCase__ )[0]
a__ = tf.gradients(UpperCamelCase__ ,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_train():
a__ = model(UpperCamelCase__ ,labels=UpperCamelCase__ ,training=UpperCamelCase__ )[0]
a__ = tf.gradients(UpperCamelCase__ ,model.trainable_variables )
return gradients
a__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase__( self :Any ,__snake_case :Tuple ) -> List[str]:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCamelCase__ ,repeat=1 ,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a__ = timeit.repeat(
UpperCamelCase__ ,repeat=self.args.repeat ,number=10 ,)
return min(UpperCamelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def lowerCamelCase__( self :str ,__snake_case :Optional[Any] ) -> Tuple:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
a__ = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
a__ = '''N/A'''
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
a__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
a__ = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
a__ = meminfo.used
a__ = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
a__ = None
else:
a__ = measure_peak_memory_cpu(UpperCamelCase__ )
a__ = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ ,UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
a__ = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
a__ = summary.total
else:
a__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 711
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = SMALL_MODEL_IDENTIFIER
__A : Tuple = "pt"
__A : int = "tf"
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Optional[int] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = "mock_framework"
# Framework provided - return whatever the user provides
__A : Tuple = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
__A : List[str] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
__A : Optional[Any] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
__A : Dict = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
__A : List[str] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
__A : Optional[int] = FeaturesManager.determine_framework(__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Tuple = MagicMock(return_value=__UpperCAmelCase )
with patch("transformers.onnx.features.is_tf_available" , __UpperCAmelCase ):
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__A : Tuple = MagicMock(return_value=__UpperCAmelCase )
with patch("transformers.onnx.features.is_torch_available" , __UpperCAmelCase ):
__A : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
__A : Tuple = MagicMock(return_value=__UpperCAmelCase )
__A : Optional[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch("transformers.onnx.features.is_tf_available" , __UpperCAmelCase ), patch(
"transformers.onnx.features.is_torch_available" , __UpperCAmelCase ):
__A : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
__A : Any = MagicMock(return_value=__UpperCAmelCase )
__A : Optional[int] = MagicMock(return_value=__UpperCAmelCase )
with patch("transformers.onnx.features.is_tf_available" , __UpperCAmelCase ), patch(
"transformers.onnx.features.is_torch_available" , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 520
|
from math import isqrt, loga
def lowerCamelCase_ ( _lowercase ) -> list[int]:
__A : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowercase , _lowercase ):
__A : List[str] = False
return [i for i in range(2 , _lowercase ) if is_prime[i]]
def lowerCamelCase_ ( _lowercase = 800_800 , _lowercase = 800_800 ) -> int:
__A : List[str] = degree * loga(_lowercase )
__A : List[Any] = int(_lowercase )
__A : Optional[Any] = calculate_prime_numbers(_lowercase )
__A : Optional[int] = 0
__A : Optional[int] = 0
__A : List[Any] = len(_lowercase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 520
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 50 ):
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 37
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 1
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
_a = 42
_a = None
_a = None
def a ( A__ : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(A__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(A__ , A__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(A__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
A__ : TreeNode | None , A__ : float , A__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , A__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , A__ )
)
return is_binary_search_tree_recursive_check(A__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = StableDiffusionDiffEditPipeline
_a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase , )
_lowercase =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
_lowercase =DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_zero=lowerCAmelCase , )
torch.manual_seed(0 )
_lowercase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
_lowercase =CLIPTextModel(lowerCAmelCase )
_lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase ={
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Tuple:
'''simple docstring'''
_lowercase =floats_tensor((1, 16, 16) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
'''simple docstring'''
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
'''simple docstring'''
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def A__ ( self ) -> Any:
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
_lowercase =self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase , lowerCAmelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =pipe_loaded(**lowerCAmelCase )[0]
_lowercase =np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase , 1e-4 )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_mask_inputs(lowerCAmelCase )
_lowercase =pipe.generate_mask(**lowerCAmelCase )
_lowercase =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
_lowercase =np.array([0] * 9 )
_lowercase =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inversion_inputs(lowerCAmelCase )
_lowercase =pipe.invert(**lowerCAmelCase ).images
_lowercase =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_lowercase =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_lowercase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase ='cpu'
_lowercase =self.get_dummy_components()
_lowercase ={'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
_lowercase =DPMSolverMultistepScheduler(**lowerCAmelCase )
_lowercase =DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
_lowercase =self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inversion_inputs(lowerCAmelCase )
_lowercase =pipe.invert(**lowerCAmelCase ).images
_lowercase =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
_lowercase =np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
_lowercase =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A__ ( cls ) -> int:
'''simple docstring'''
_lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
_lowercase =raw_image.convert('RGB' ).resize((768, 768) )
_lowercase =raw_image
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =torch.manual_seed(0 )
_lowercase =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase =DDIMScheduler.from_config(pipe.scheduler.config )
_lowercase =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='a bowl of fruit'
_lowercase ='a bowl of pears'
_lowercase =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
_lowercase =pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase ).latents
_lowercase =pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_lowercase =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =torch.manual_seed(0 )
_lowercase =StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowercase =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='a bowl of fruit'
_lowercase ='a bowl of pears'
_lowercase =pipe.generate_mask(
image=self.raw_image , source_prompt=lowerCAmelCase , target_prompt=lowerCAmelCase , generator=lowerCAmelCase , )
_lowercase =pipe.invert(
prompt=lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=lowerCAmelCase , num_inference_steps=25 , ).latents
_lowercase =pipe(
prompt=lowerCAmelCase , mask_image=lowerCAmelCase , image_latents=lowerCAmelCase , generator=lowerCAmelCase , negative_prompt=lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_lowercase =(
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 291
| 1
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __A ( lowerCAmelCase_ ):
for i in range(0 , lowerCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __A ( lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ , 0 , -1 ):
for _ in range(lowerCAmelCase_ , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __A ( lowerCAmelCase_ ):
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
lowerCAmelCase_ : List[str] = 1
while K:
lowerCAmelCase_ : int = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
lowerCAmelCase_ : Any = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 156
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowerCAmelCase_ : int = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = {}
with open(lowerCAmelCase_ , """r""" ) as file:
for line_number, line in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = line.strip()
if line:
_UpperCAmelCase : Any = line.split()
_UpperCAmelCase : Union[str, Any] = line_number
_UpperCAmelCase : Optional[Any] = words[0]
_UpperCAmelCase : str = value
return result
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for attribute in key.split(""".""" ):
_UpperCAmelCase : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_UpperCAmelCase : str = """param"""
if weight_type is not None and weight_type != "param":
_UpperCAmelCase : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase : Tuple = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_UpperCAmelCase : Any = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : str = shape_pointer.shape
# let's reduce dimension
_UpperCAmelCase : Union[str, Any] = value[0]
else:
_UpperCAmelCase : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase : int = value
elif weight_type == "weight_g":
_UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
_UpperCAmelCase : Tuple = value
elif weight_type == "bias":
_UpperCAmelCase : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_UpperCAmelCase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Tuple = value
else:
_UpperCAmelCase : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_UpperCAmelCase : int = """param"""
if weight_type is not None and weight_type != "param":
_UpperCAmelCase : Dict = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_UpperCAmelCase : Union[str, Any] = """.""".join([key, hf_param_name] )
else:
_UpperCAmelCase : Any = key
_UpperCAmelCase : int = value if """lm_head""" in full_key else value[0]
lowerCAmelCase_ : Optional[Any] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
_UpperCAmelCase : List[str] = False
for key, mapped_key in MAPPING.items():
_UpperCAmelCase : Union[str, Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
_UpperCAmelCase : List[str] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
_UpperCAmelCase : Tuple = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "weight_g" in name:
_UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase : Union[str, Any] = """weight_v"""
elif "bias" in name:
_UpperCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : Optional[int] = """weight"""
else:
_UpperCAmelCase : Dict = None
if hf_dict is not None:
rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return is_used
return is_used
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = fairseq_model.state_dict()
_UpperCAmelCase : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase : List[str] = True
else:
_UpperCAmelCase : List[Any] = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Tuple = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase : str = name.split(""".""" )
_UpperCAmelCase : Optional[Any] = int(items[0] )
_UpperCAmelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ):
if config_path is not None:
_UpperCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
else:
_UpperCAmelCase : Optional[int] = WavaVecaConfig()
if is_seq_class:
_UpperCAmelCase : Any = read_txt_into_dict(lowerCAmelCase_ )
_UpperCAmelCase : int = idalabel
_UpperCAmelCase : Optional[int] = WavaVecaForSequenceClassification(lowerCAmelCase_ )
_UpperCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
feature_extractor.save_pretrained(lowerCAmelCase_ )
elif is_finetuned:
if dict_path:
_UpperCAmelCase : int = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase : Union[str, Any] = target_dict.pad_index
_UpperCAmelCase : Dict = target_dict.bos_index
_UpperCAmelCase : Dict = target_dict.eos_index
_UpperCAmelCase : Tuple = len(target_dict.symbols )
_UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Any = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
_UpperCAmelCase : Tuple = True if config.feat_extract_norm == """layer""" else False
_UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
_UpperCAmelCase : Tuple = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = WavaVecaForCTC(lowerCAmelCase_ )
else:
_UpperCAmelCase : Union[str, Any] = WavaVecaForPreTraining(lowerCAmelCase_ )
if is_finetuned or is_seq_class:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_UpperCAmelCase : int = argparse.Namespace(task="""audio_pretraining""" )
_UpperCAmelCase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
_UpperCAmelCase : Tuple = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 156
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : List[str] ={
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] =['''ConvNextFeatureExtractor''']
_lowerCAmelCase : Optional[Any] =['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple =[
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str =[
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 113
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowercase : List[Any] = re.compile(r'''\s+''')
def lowerCAmelCase__ ( _a : int ):
return {"hash": hashlib.mda(re.sub(_a , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowerCAmelCase__ ( _a : Optional[int] ):
snake_case_ : Optional[int] = [len(_a ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_a ), "line_max": max(_a )}
def lowerCAmelCase__ ( _a : Optional[int] ):
snake_case_ : str = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase__ ( _a : str , _a : Tuple ):
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowerCAmelCase__ ( _a : Tuple , _a : Any=5 ):
snake_case_ : Union[str, Any] = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Union[str, Any] = example["content"].splitlines()
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase__ ( _a : List[str] , _a : Dict=5 , _a : Union[str, Any]=0.05 ):
snake_case_ : Optional[Any] = ["unit tests", "test file", "configuration file"]
snake_case_ : Optional[Any] = example["content"].splitlines()
snake_case_ : List[str] = 0
snake_case_ : List[Any] = 0
# first test
for _, line in zip(range(_a ) , _a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : List[str] = example["content"].count("\n" )
snake_case_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : Optional[int] = ["def ", "class ", "for ", "while "]
snake_case_ : Optional[Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase__ ( _a : List[Any] , _a : Tuple=4 ):
snake_case_ : List[Any] = example["content"].splitlines()
snake_case_ : Tuple = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : Dict = tokenizer(example["content"] , truncation=_a )["input_ids"]
snake_case_ : Dict = len(example["content"] ) / len(_a )
return {"ratio": ratio}
def lowerCAmelCase__ ( _a : Dict ):
snake_case_ : Any = {}
results.update(get_hash(_a ) )
results.update(line_stats(_a ) )
results.update(alpha_stats(_a ) )
results.update(char_token_ratio(_a ) )
results.update(is_autogenerated(_a ) )
results.update(is_config_or_test(_a ) )
results.update(has_no_keywords(_a ) )
results.update(has_few_assignments(_a ) )
return results
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : Dict , _a : Any ):
if not check_uniques(_a , _a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase__ ( _a : Optional[int] ):
with open(_a , "rb" ) as f_in:
with gzip.open(str(_a ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_a , _a )
os.unlink(_a )
# Settings
lowercase : Union[str, Any] = HfArgumentParser(PreprocessingArguments)
lowercase : List[Any] = parser.parse_args()
if args.num_workers is None:
lowercase : str = multiprocessing.cpu_count()
lowercase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowercase : Any = time.time()
lowercase : Dict = load_dataset(args.dataset_name, split='''train''')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowercase : str = time.time()
lowercase : List[Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowercase : Optional[int] = set(ds.unique('''hash'''))
lowercase : List[str] = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowercase : str = time.time()
lowercase : List[str] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowercase : Dict = time.time()
lowercase ,lowercase : Any = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowercase : Dict = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowercase : str = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowercase : Optional[int] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowercase : Optional[int] = str(data_dir / F"""file-{file_number+1:012}.json""")
lowercase : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 568
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCamelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> List[str]:
"""simple docstring"""
__a = botoa.client('iam' )
__a = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE__, AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__, indent=2 ) )
__a = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE__, PolicyName=f"""{role_name}_policy_permission""", PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"""role {role_name} already exists. Using existing one""" )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> Optional[Any]:
"""simple docstring"""
__a = botoa.client('iam' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE__ )["Role"]["Arn"]
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__a = _ask_options(
'How do you want to authorize?', ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '], SCREAMING_SNAKE_CASE__, )
__a = None
if credentials_configuration == 0:
__a = _ask_field('Enter your AWS Profile name: [default] ', default='default' )
__a = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__a = _ask_field('AWS Access Key ID: ' )
__a = aws_access_key_id
__a = _ask_field('AWS Secret Access Key: ' )
__a = aws_secret_access_key
__a = _ask_field('Enter your AWS Region: [us-east-1]', default='us-east-1' )
__a = aws_region
__a = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?', ['Provide IAM Role name', 'Create new IAM role using credentials'], SCREAMING_SNAKE_CASE__, )
if role_management == 0:
__a = _ask_field('Enter your IAM role name: ' )
else:
__a = 'accelerate_sagemaker_execution_role'
print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE__ )
__a = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_custom_docker_image:
__a = _ask_field('Enter your Docker image: ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower() )
__a = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), )
__a = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), )
__a = _ask_options(
'What is the distributed mode?', ['No distributed training', 'Data parallelism'], _convert_sagemaker_distributed_mode, )
__a = {}
__a = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
if use_dynamo:
__a = 'dynamo_'
__a = _ask_options(
'Which dynamo backend would you like to use?', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
__a = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
if use_custom_options:
__a = _ask_options(
'Which mode do you want to use?', SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE__ )], default='default', )
__a = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), default='ml.p3.2xlarge' )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
'How many machines do you want use? [1]: ', SCREAMING_SNAKE_CASE__, default=1, )
__a = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?', ['no', 'fp16', 'bf16', 'fp8'], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE__, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=SCREAMING_SNAKE_CASE__, use_cpu=SCREAMING_SNAKE_CASE__, dynamo_config=SCREAMING_SNAKE_CASE__, eca_instance_type=SCREAMING_SNAKE_CASE__, profile=SCREAMING_SNAKE_CASE__, region=SCREAMING_SNAKE_CASE__, iam_role_name=SCREAMING_SNAKE_CASE__, mixed_precision=SCREAMING_SNAKE_CASE__, num_machines=SCREAMING_SNAKE_CASE__, sagemaker_inputs_file=SCREAMING_SNAKE_CASE__, sagemaker_metrics_file=SCREAMING_SNAKE_CASE__, )
| 270
| 0
|
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : int | str ):
a__ = str(__lowerCAmelCase )
return n == n[::-1]
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for i in range(1 , __lowerCAmelCase ):
if is_palindrome(__lowerCAmelCase ) and is_palindrome(bin(__lowerCAmelCase ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 335
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case : Tuple = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case : Tuple = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowercase ( ):
a__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a__ = bs[:]
a__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
a__ = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : List[Any] ):
a__ = set()
a__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ = char
return pairs
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self :Dict ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Tuple="replace" ,__snake_case :Tuple="<s>" ,__snake_case :Tuple="</s>" ,__snake_case :Dict="</s>" ,__snake_case :Any="<s>" ,__snake_case :List[str]="<unk>" ,__snake_case :Tuple="<pad>" ,__snake_case :Dict="<mask>" ,__snake_case :Dict=False ,**__snake_case :Union[str, Any] ,) -> Optional[Any]:
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else bos_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else eos_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else sep_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else cls_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else unk_token
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else mask_token
super().__init__(
errors=__snake_case ,bos_token=__snake_case ,eos_token=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,cls_token=__snake_case ,pad_token=__snake_case ,mask_token=__snake_case ,add_prefix_space=__snake_case ,**__snake_case ,)
with open(__snake_case ,encoding='utf-8' ) as vocab_handle:
a__ = json.load(__snake_case )
a__ = {v: k for k, v in self.encoder.items()}
a__ = errors # how to handle errors in decoding
a__ = bytes_to_unicode()
a__ = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case ,encoding='utf-8' ) as merges_handle:
a__ = merges_handle.read().split('\n' )[1:-1]
a__ = [tuple(merge.split() ) for merge in bpe_merges]
a__ = dict(zip(__snake_case ,range(len(__snake_case ) ) ) )
a__ = {}
a__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
return len(self.encoder )
def lowerCamelCase__( self :List[Any] ) -> str:
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCamelCase__( self :List[str] ,__snake_case :int ) -> List[Any]:
if token in self.cache:
return self.cache[token]
a__ = tuple(__snake_case )
a__ = get_pairs(__snake_case )
if not pairs:
return token
while True:
a__ = min(__snake_case ,key=lambda __snake_case : self.bpe_ranks.get(__snake_case ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ = bigram
a__ = []
a__ = 0
while i < len(__snake_case ):
try:
a__ = word.index(__snake_case ,__snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ = tuple(__snake_case )
a__ = new_word
if len(__snake_case ) == 1:
break
else:
a__ = get_pairs(__snake_case )
a__ = ' '.join(__snake_case )
a__ = word
return word
def lowerCamelCase__( self :List[Any] ,__snake_case :Tuple ) -> Any:
a__ = []
for token in re.findall(self.pat ,__snake_case ):
a__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(' ' ) )
return bpe_tokens
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Tuple ) -> Tuple:
return self.encoder.get(__snake_case ,self.encoder.get(self.unk_token ) )
def lowerCamelCase__( self :str ,__snake_case :str ) -> Tuple:
return self.decoder.get(__snake_case )
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ) -> Any:
a__ = ''.join(__snake_case )
a__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__snake_case ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__snake_case ,ensure_ascii=__snake_case ) + '\n' )
a__ = 0
with open(__snake_case ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
a__ = token_index
writer.write(' '.join(__snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ = [self.cls_token_id]
a__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__( self :Tuple ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ,__snake_case :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case ,token_ids_a=__snake_case ,already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowerCamelCase__( self :int ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__( self :List[Any] ,__snake_case :List[str] ,__snake_case :Dict=False ,**__snake_case :Optional[Any] ) -> Tuple:
a__ = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
a__ = ' ' + text
return (text, kwargs)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Union[Dict[str, EncodedInput], BatchEncoding] ,__snake_case :Optional[int] = None ,__snake_case :PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,__snake_case :Optional[int] = None ,__snake_case :Optional[bool] = None ,) -> dict:
a__ = super()._pad(
encoded_inputs=__snake_case ,max_length=__snake_case ,padding_strategy=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=__snake_case ,)
# Load from model defaults
if return_attention_mask is None:
a__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a__ = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 335
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def A_( A : Optional[Any] , A : List[str]):
UpperCamelCase = u
for i in range(1 , _lowercase):
UpperCamelCase = temp * (u - i)
return temp
def A_( ):
UpperCamelCase = int(input('enter the numbers of values: '))
UpperCamelCase = []
for _ in range(_lowercase):
y.append([])
for i in range(_lowercase):
for j in range(_lowercase):
y[i].append(_lowercase)
UpperCamelCase = 0
print('enter the values of parameters in a list: ')
UpperCamelCase = list(map(_lowercase , input().split()))
print('enter the values of corresponding parameters: ')
for i in range(_lowercase):
UpperCamelCase = float(input())
UpperCamelCase = int(input('enter the value to interpolate: '))
UpperCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowercase):
for j in range(n - i):
UpperCamelCase = y[j + 1][i - 1] - y[j][i - 1]
UpperCamelCase = y[0][0]
for i in range(1 , _lowercase):
summ += (ucal(_lowercase , _lowercase) * y[0][i]) / math.factorial(_lowercase)
print(f'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
import argparse
import copy
def A_( A : Optional[int]):
UpperCamelCase = {}
with open(A) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[1], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
UpperCamelCase = []
_list.append([line.split()[0], line.split()[2]])
UpperCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def A_( A : Union[str, Any] , A : str):
with open(A) as f:
UpperCamelCase = f.read(1)
UpperCamelCase = start_node
UpperCamelCase = []
UpperCamelCase = start_node
UpperCamelCase = 0
while visiting not in first_solution:
UpperCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(A) and k[0] not in first_solution:
UpperCamelCase = k[1]
UpperCamelCase = k[0]
first_solution.append(A)
UpperCamelCase = distance_of_first_solution + int(A)
UpperCamelCase = best_node
first_solution.append(A)
UpperCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def A_( A : List[Any] , A : str):
UpperCamelCase = []
for n in solution[1:-1]:
UpperCamelCase = solution.index(A)
for kn in solution[1:-1]:
UpperCamelCase = solution.index(A)
if n == kn:
continue
UpperCamelCase = copy.deepcopy(A)
UpperCamelCase = kn
UpperCamelCase = n
UpperCamelCase = 0
for k in _tmp[:-1]:
UpperCamelCase = _tmp[_tmp.index(A) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCamelCase = distance + int(i[1])
_tmp.append(A)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
UpperCamelCase = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda A: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def A_( A : List[str] , A : Any , A : Any , A : List[Any] , A : Any):
UpperCamelCase = 1
UpperCamelCase = first_solution
UpperCamelCase = []
UpperCamelCase = distance_of_first_solution
UpperCamelCase = solution
while count <= iters:
UpperCamelCase = find_neighborhood(A , A)
UpperCamelCase = 0
UpperCamelCase = neighborhood[index_of_best_solution]
UpperCamelCase = len(A) - 1
UpperCamelCase = False
while not found:
UpperCamelCase = 0
while i < len(A):
if best_solution[i] != solution[i]:
UpperCamelCase = best_solution[i]
UpperCamelCase = solution[i]
break
UpperCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
UpperCamelCase = True
UpperCamelCase = best_solution[:-1]
UpperCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCamelCase = cost
UpperCamelCase = solution
else:
UpperCamelCase = index_of_best_solution + 1
UpperCamelCase = neighborhood[index_of_best_solution]
if len(A) >= size:
tabu_list.pop(0)
UpperCamelCase = count + 1
return best_solution_ever, best_cost
def A_( A : Optional[Any]=None):
UpperCamelCase = generate_neighbours(args.File)
UpperCamelCase , UpperCamelCase = generate_first_solution(
args.File , A)
UpperCamelCase , UpperCamelCase = tabu_search(
A , A , A , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''')
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 432
| 0
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=16 , __snake_case=2 , __snake_case=0.02 , __snake_case=3 , __snake_case=4 , __snake_case=None , ) -> List[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_input_mask
__a =use_token_type_ids
__a =use_labels
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =type_sequence_label_size
__a =initializer_range
__a =num_labels
__a =num_choices
__a =scope
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =None
if self.use_input_mask:
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a =None
__a =None
__a =None
if self.use_labels:
__a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a =ids_tensor([self.batch_size] , self.num_choices )
__a =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =NystromformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__a =model(__snake_case , token_type_ids=__snake_case )
__a =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
__a =NystromformerForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
__a =NystromformerForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a =self.num_labels
__a =NystromformerForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
__a =self.num_labels
__a =NystromformerForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
__a =self.num_choices
__a =NystromformerForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =NystromformerModelTester(self )
__a =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a =type
self.model_tester.create_and_check_model(*__snake_case )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a =NystromformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
__a =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__a =model(__snake_case )[0]
__a =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __snake_case )
__a =torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a ='the [MASK] of Belgium is Brussels'
__a =AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
__a =NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
__a =tokenizer(__snake_case , return_tensors='pt' )
with torch.no_grad():
__a =model(encoding.input_ids ).logits
__a =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__snake_case ) , 'capital' )
| 242
|
import torch
from transformers import AutoModel
class __magic_name__ ( torch.nn.Module ):
def __init__( self , __snake_case="sayef/fsner-bert-base-uncased" ) -> str:
'''simple docstring'''
super(__snake_case , self ).__init__()
__a =AutoModel.from_pretrained(__snake_case , return_dict=__snake_case )
__a =torch.nn.CosineSimilarity(3 , 1e-08 )
__a =torch.nn.Softmax(dim=1 )
def __magic_name__ ( self , **__snake_case ) -> Tuple:
'''simple docstring'''
return self.bert(**__snake_case ).last_hidden_state
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=1 ) -> List[str]:
'''simple docstring'''
return self.softmax(T * self.cos(__snake_case , __snake_case ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
__a =W_supports['sizes'].tolist()
__a =W_supports['start_token_id'].item()
__a =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a =self.BERT(**__snake_case )
__a =self.BERT(**__snake_case )
__a =None
__a =None
__a =W_supports['input_ids'] == start_token_id
__a =W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case ):
if i == 0:
__a =0
else:
__a =support_sizes[i - 1]
__a =S[s : s + size][start_token_masks[s : s + size]]
__a =S[s : s + size][end_token_masks[s : s + size]]
__a =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a =torch.vstack((p_starts, p_start) )
__a =torch.vstack((p_ends, p_end) )
else:
__a =p_start
__a =p_end
return p_starts, p_ends
| 242
| 1
|
'''simple docstring'''
from collections import defaultdict
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =first_str.lower().strip()
SCREAMING_SNAKE_CASE__ : Optional[Any] =second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE__ : Tuple =first_str.replace(''' ''', '''''' )
SCREAMING_SNAKE_CASE__ : List[str] =second_str.replace(''' ''', '''''' )
# Strings of different lengths are not anagrams
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE__ : defaultdict[str, int] =defaultdict(UpperCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
a_ = input('Enter the first string ').strip()
a_ = input('Enter the second string ').strip()
a_ = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 708
|
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : Optional[int] =right
SCREAMING_SNAKE_CASE__ : Tuple =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : str =point - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =point + 1
return None
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 665
| 0
|
"""simple docstring"""
import random
def UpperCAmelCase ( snake_case : int , snake_case : float , snake_case : bool = False ):
_lowerCAmelCase:dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case ):
for j in range(i + 1 , snake_case ):
if random.random() < probability:
graph[i].append(snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case )
return graph
def UpperCAmelCase ( snake_case : int ):
return {
i: [j for j in range(snake_case ) if i != j] for i in range(snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {}
class a__ ( UpperCamelCase_ ):
snake_case__ = '''llama'''
snake_case__ = ['''past_key_values''']
def __init__( self : str ,a__ : Union[str, Any]=3_2000 ,a__ : Any=4096 ,a__ : int=1_1008 ,a__ : int=32 ,a__ : Optional[Any]=32 ,a__ : List[Any]=None ,a__ : List[Any]="silu" ,a__ : Union[str, Any]=2048 ,a__ : Any=0.02 ,a__ : Any=1E-6 ,a__ : int=True ,a__ : Optional[int]=0 ,a__ : Any=1 ,a__ : Any=2 ,a__ : str=1 ,a__ : str=False ,a__ : Union[str, Any]=None ,**a__ : List[Any] ,) -> str:
"""simple docstring"""
_lowerCAmelCase:Tuple = vocab_size
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:Dict = intermediate_size
_lowerCAmelCase:List[Any] = num_hidden_layers
_lowerCAmelCase:List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase:List[Any] = num_attention_heads
_lowerCAmelCase:Any = num_key_value_heads
_lowerCAmelCase:Union[str, Any] = hidden_act
_lowerCAmelCase:int = initializer_range
_lowerCAmelCase:Any = rms_norm_eps
_lowerCAmelCase:Optional[Any] = pretraining_tp
_lowerCAmelCase:str = use_cache
_lowerCAmelCase:Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,tie_word_embeddings=a__ ,**a__ ,)
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a__) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'got {self.rope_scaling}')
_lowerCAmelCase:Optional[Any] = self.rope_scaling.get('''type''' ,a__)
_lowerCAmelCase:Any = self.rope_scaling.get('''factor''' ,a__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(a__ ,a__) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 227
| 1
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class __snake_case (UpperCamelCase_ ):
def __init__( self : List[str] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 719
|
import numpy as np
def _UpperCAmelCase (UpperCamelCase_ : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case : Tuple = {'target_lang': 'fi', 'source_lang': 'en'}
snake_case : str = '>>zh<<'
snake_case : Optional[Any] = 'Helsinki-NLP/'
if is_torch_available():
snake_case : Optional[Any] = 'pt'
elif is_tf_available():
snake_case : Optional[int] = 'tf'
else:
snake_case : Optional[Any] = 'jax'
@require_sentencepiece
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Any = True
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__lowercase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__lowercase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , **__UpperCAmelCase ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """</s>"""
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__UpperCAmelCase ) , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__lowercase = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__UpperCAmelCase , batch.input_ids[0] )
__lowercase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__UpperCAmelCase )
__lowercase = [x.name for x in Path(__UpperCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __UpperCAmelCase )
MarianTokenizer.from_pretrained(__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(
["""I am a small frog""" * 1_0_0_0, """I am a small frog"""] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {"""input_ids""": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__lowercase = """Tämä on testi"""
__lowercase = """This is a test"""
__lowercase = [7_6, 7, 2_0_4_7, 2]
__lowercase = [6_9, 1_2, 1_1, 9_4_0, 2]
__lowercase = tokenizer(__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer(text_target=__UpperCAmelCase ).input_ids
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 566
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__UpperCamelCase : Optional[Any] = HfArgumentParser(InitializationArguments)
__UpperCamelCase : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__UpperCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__UpperCamelCase : Optional[int] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
__UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__UpperCamelCase : Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 53
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 379
|
'''simple docstring'''
def lowerCAmelCase_ ( a : list , a : int , a : int = 0 , a : int = 0 ):
a__ = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394
| 0
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = len(_UpperCamelCase )
for _ in range(_UpperCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCAmelCase , __lowerCAmelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A : List[Any] = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 282
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a=1 , __a=False , **__a ):
super().__init__(**__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_embed
__lowerCAmelCase = d_proj
__lowerCAmelCase = cutoffs + [vocab_size]
__lowerCAmelCase = [0] + self.cutoffs
__lowerCAmelCase = div_val
__lowerCAmelCase = self.cutoffs[0]
__lowerCAmelCase = len(self.cutoffs ) - 1
__lowerCAmelCase = self.shortlist_size + self.n_clusters
__lowerCAmelCase = keep_order
__lowerCAmelCase = []
__lowerCAmelCase = []
def snake_case ( self , __a ):
if self.n_clusters > 0:
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__a , name="cluster_weight" )
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__a , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" , )
self.out_projs.append(__a )
else:
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCAmelCase = self.d_embed // (self.div_val**i)
__lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" )
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(__a )
@staticmethod
def snake_case ( __a , __a , __a , __a=None ):
__lowerCAmelCase = x
if proj is not None:
__lowerCAmelCase = tf.einsum("ibd,ed->ibe" , __a , __a )
return tf.einsum("ibd,nd->ibn" , __a , __a ) + b
@staticmethod
def snake_case ( __a , __a ):
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
__lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(__a , __a )
def snake_case ( self , __a , __a , __a=True , __a=False ):
__lowerCAmelCase = 0
if self.n_clusters == 0:
__lowerCAmelCase = self._logit(__a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__a , logits=__a )
__lowerCAmelCase = tf.nn.log_softmax(__a , axis=-1 )
else:
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = []
__lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowerCAmelCase = (target >= l_idx) & (target < r_idx)
__lowerCAmelCase = tf.where(__a )
__lowerCAmelCase = tf.boolean_mask(__a , __a ) - l_idx
if self.div_val == 1:
__lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
__lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
__lowerCAmelCase = self.out_layers[i][0]
__lowerCAmelCase = self.out_layers[i][1]
if i == 0:
__lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
__lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[0] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
else:
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[i] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
__lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__a )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__a , -cur_logprob , shape_list(__a ) )
__lowerCAmelCase = tf.concat(__a , axis=-1 )
if target is not None:
if return_mean:
__lowerCAmelCase = tf.reduce_mean(__a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__a , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 282
| 1
|
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_heads' ) )
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : str=3 , _UpperCAmelCase : List[Any]=[16, 48, 96] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Optional[int]=[1, 2, 10] , _UpperCAmelCase : Optional[Any]=[7, 3, 3] , _UpperCAmelCase : List[Any]=[4, 2, 2] , _UpperCAmelCase : Optional[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[Any]=[2, 2, 2] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Optional[Any]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Tuple=1E-1_2 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=2 , ):
_A = parent
_A = batch_size
_A = image_size
_A = patch_sizes
_A = patch_stride
_A = patch_padding
_A = is_training
_A = use_labels
_A = num_labels
_A = num_channels
_A = embed_dim
_A = num_heads
_A = stride_kv
_A = depth
_A = cls_token
_A = attention_drop_rate
_A = initializer_range
_A = layer_norm_eps
def lowerCAmelCase_ ( self : Dict ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Optional[int] ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ):
_A = CvtModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
_A = (self.image_size, self.image_size)
_A , _A = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_A = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_A = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
_A = self.num_labels
_A = CvtForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
def lowerCAmelCase_ ( self : Any ):
_A = CvtModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : List[str] ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCAmelCase_ ( self : Dict ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ):
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_A = outputs.hidden_states
_A = len(self.model_tester.depth )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = CvtModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : int ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self : str ):
_A = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
# verify the logits
_A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_A = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 7
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356
| 0
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
assert x is not None
assert y is not None
UpperCAmelCase : List[Any] = len(UpperCamelCase )
UpperCAmelCase : int = len(UpperCamelCase )
# declaring the array for storing the dp values
UpperCAmelCase : Dict = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
UpperCAmelCase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
UpperCAmelCase : Any = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
UpperCAmelCase : Union[str, Any] = """"""
UpperCAmelCase , UpperCAmelCase : Tuple = m, n
while i > 0 and j > 0:
UpperCAmelCase : Tuple = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
UpperCAmelCase : Dict = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A: str = "AGGTAB"
A: Any = "GXTXAYB"
A: int = 4
A: Optional[int] = "GTAB"
A , A: Dict = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A: Union[str, Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A: int = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A: Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Tuple = len([g for position, g in enumerate(UpperCamelCase ) if g == main_target[position]] )
return (item, float(UpperCamelCase ))
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : List[str] = random.randint(0 , len(UpperCamelCase ) - 1 )
UpperCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _snake_case ( UpperCamelCase : str , UpperCamelCase : list[str] ):
UpperCAmelCase : str = list(UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase : int = random.choice(UpperCamelCase )
return "".join(UpperCamelCase )
def _snake_case ( UpperCamelCase : tuple[str, float] , UpperCamelCase : list[tuple[str, float]] , UpperCamelCase : list[str] , ):
UpperCAmelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase : Optional[Any] = int(parent_a[1] * 100 ) + 1
UpperCAmelCase : List[str] = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase ):
UpperCAmelCase : List[str] = population_score[random.randint(0 , UpperCamelCase )][0]
UpperCAmelCase , UpperCAmelCase : Any = crossover(parent_a[0] , UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
return pop
def _snake_case ( UpperCamelCase : str , UpperCamelCase : list[str] , UpperCamelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase : Dict = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase : Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(UpperCamelCase )
# Generate random starting population.
UpperCAmelCase : Optional[int] = []
for _ in range(UpperCamelCase ):
population.append("""""".join([random.choice(UpperCamelCase ) for i in range(len(UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase , UpperCAmelCase : Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase : str = [evaluate(UpperCamelCase , UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase : Union[str, Any] = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase )
# Normalize population score to be between 0 and 1.
UpperCAmelCase : List[str] = [
(item, score / len(UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase ):
population.extend(select(population_score[int(UpperCamelCase )] , UpperCamelCase , UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
A: Union[str, Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
A: Dict = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
A , A , A: List[Any] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 359
| 1
|
class lowerCAmelCase_ : # Public class to implement a graph
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :list[list[bool]] ):
UpperCamelCase__ :List[str] = row
UpperCamelCase__ :List[Any] = col
UpperCamelCase__ :Dict = graph
def __a ( self :Dict , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :list[list[bool]] ):
# Checking all 8 elements surrounding nth element
UpperCamelCase__ :Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCamelCase__ :Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCamelCase__ :List[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ )
def __a ( self :str ): # And finally, count all islands.
UpperCamelCase__ :List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCamelCase__ :str = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += 1
return count
| 45
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : torch.FloatTensor
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[int] = (64,) , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "silu" , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : float = 0.1_82_15 , SCREAMING_SNAKE_CASE__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCamelCase__ = VectorQuantizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=0.25 , remap=SCREAMING_SNAKE_CASE__ , sane_index_shape=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
# pass init params to Decoder
lowerCamelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , norm_type=SCREAMING_SNAKE_CASE__ , )
@apply_forward_hook
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.quantize(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = h
lowerCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE__ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ):
lowerCamelCase__ = sample
lowerCamelCase__ = self.encode(SCREAMING_SNAKE_CASE__ ).latents
lowerCamelCase__ = self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 659
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case = TypeVar("KEY")
_snake_case = TypeVar("VAL")
@dataclass(frozen=SCREAMING_SNAKE_CASE_ , slots=SCREAMING_SNAKE_CASE_ )
class _a ( Generic[KEY, VAL] ):
a_ : KEY
a_ : VAL
class _a ( _Item ):
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : str ):
return False
_snake_case = _DeletedItem()
class _a ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ):
lowerCamelCase__ = initial_block_size
lowerCamelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ = capacity_factor
lowerCamelCase__ = 0
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY ):
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : int ):
return (ind + 1) % len(self._buckets )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
lowerCamelCase__ = self._buckets[ind]
if not stored:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = self._buckets
lowerCamelCase__ = [None] * new_size
lowerCamelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _UpperCamelCase ( self : List[str] ):
self._resize(len(self._buckets ) * 2 )
def _UpperCamelCase ( self : Optional[int] ):
self._resize(len(self._buckets ) // 2 )
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ):
lowerCamelCase__ = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : Dict , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , SCREAMING_SNAKE_CASE__ : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ):
return self._len
def __iter__( self : Optional[int] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowerCamelCase__ = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 659
| 1
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = k_size // 2
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase__ :List[Any] = 1 / (2 * pi * sigma) * exp(-(square(_SCREAMING_SNAKE_CASE ) + square(_SCREAMING_SNAKE_CASE )) / (2 * square(_SCREAMING_SNAKE_CASE )) )
return g
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase__ :Union[str, Any] = height - k_size + 1
lowerCAmelCase__ :Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase__ :Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase__ :Tuple = 0
for i, j in product(range(_SCREAMING_SNAKE_CASE ) , range(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase__ :Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase__ :str = gen_gaussian_kernel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = ravel(_SCREAMING_SNAKE_CASE )
# reshape and get the dst image
lowerCAmelCase__ :Optional[Any] = dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
return dst
if __name__ == "__main__":
# read original image
__A = imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A = gaussian_filter(gray, 3, sigma=1)
__A = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 93
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
__A : Any = 3
__A : Union[str, Any] = 250
__A : int = ids_tensor((batch_size, length) , _A )
__A : List[Any] = torch.ones((batch_size, length) , device=_A , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase_ ( self ):
__A , __A : Tuple = self._get_tensors(5 )
__A : Optional[Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A , _A ) )
__A , __A : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
__A , __A : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : List[str] = MaxLengthCriteria(max_length=10 )
__A , __A : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
__A , __A : str = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
__A , __A : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__A , __A : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
__A , __A : str = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
__A , __A : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
__A : Tuple = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase_ ( self ):
__A , __A : Any = self._get_tensors(5 )
__A : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A , _A ) )
__A : Optional[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A , _A ) )
def UpperCAmelCase_ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__A : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_A ) , 1 )
| 239
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
A__ = StableDiffusionInpaintPipeline
A__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ = frozenset([] )
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCAmelCase , )
lowerCamelCase__ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__ = CLIPTextModel(_lowerCAmelCase )
lowerCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
lowerCamelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(_lowerCAmelCase )
else:
lowerCamelCase__ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
lowerCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionInpaintPipeline(**_lowerCAmelCase )
lowerCamelCase__ = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCamelCase__ = self.get_dummy_inputs(_lowerCAmelCase )
lowerCamelCase__ = sd_pipe(**_lowerCAmelCase ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
lowerCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
lowerCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , generator=_lowerCAmelCase , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __magic_name__ ( self ):
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
lowerCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
lowerCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCAmelCase , torch_dtype=torch.floataa , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , generator=_lowerCAmelCase , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __magic_name__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowerCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
lowerCamelCase__ = PNDMScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
lowerCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , scheduler=_lowerCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="np" , )
lowerCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 360
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A_ = get_logger(__name__)
A_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
for processor in self:
lowerCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(_lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
else:
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
lowerCamelCase__ = temperature
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
lowerCamelCase__ = top_p
lowerCamelCase__ = filter_value
lowerCamelCase__ = min_tokens_to_keep
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , scores.shape[-1] )
lowerCamelCase__ = jnp.full_like(_lowerCAmelCase , self.filter_value )
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ = jnp.roll(_lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_lowerCAmelCase )
# min tokens to keep
lowerCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jax.lax.sort_key_val(_lowerCAmelCase , _lowerCAmelCase )[-1]
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
lowerCamelCase__ = max(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = filter_value
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = scores.shape
lowerCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.broadcast_to((jnp.arange(_lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase__ = topk_scores.flatten()
lowerCamelCase__ = topk_indices.flatten() + shift
lowerCamelCase__ = next_scores_flat.at[topk_indices_flat].set(_lowerCAmelCase )
lowerCamelCase__ = next_scores_flat.reshape(_lowerCAmelCase , _lowerCAmelCase )
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = bos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = max_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
lowerCamelCase__ = min_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# create boolean flag to decide if min length penalty should be applied
lowerCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = begin_index
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = dict(_lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ = force_token_array.at[index].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.intaa(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
def _force_token(_lowerCAmelCase ):
lowerCamelCase__ = scores.shape[0]
lowerCamelCase__ = self.force_token_array[generation_idx]
lowerCamelCase__ = jnp.ones_like(_lowerCAmelCase , dtype=scores.dtype ) * -float("inf" )
lowerCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase__ = lax.dynamic_update_slice(_lowerCAmelCase , _lowerCAmelCase , (0, current_token) )
return new_scores
lowerCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowerCAmelCase ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = generate_config.eos_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowerCAmelCase , "max_initial_timestamp_index" ):
lowerCamelCase__ = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ = model_config.vocab_size
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowerCAmelCase , _lowerCAmelCase , )
return jnp.where(
_lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(cur_len == self.begin_index , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ = jnp.where(
_lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
return scores
| 360
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self : Any , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = True , **lowerCamelCase : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = size if size is not None else {"""height""": 384, """width""": 384}
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase = do_convert_rgb
def lowerCamelCase ( self : Any , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : str , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_UpperCAmelCase = (size["""height"""], size["""width"""])
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Dict , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Dict[str, int]] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : bool = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_UpperCAmelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase )
return encoded_outputs
| 108
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 )
| 30
| 0
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
A_ : Optional[Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = Github(os.environ['GITHUB_TOKEN'] )
lowerCamelCase__ : Dict = g.get_repo('huggingface/transformers' )
lowerCamelCase__ : Union[str, Any] = repo.get_issues(state='open' )
for issue in open_issues:
lowerCamelCase__ : Any = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
lowerCamelCase__ : Any = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 696
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A_ : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=1 ):
'''simple docstring'''
lowerCamelCase__ : Any = tokenizer
lowerCamelCase__ : Optional[Any] = dataset
lowerCamelCase__ : int = len(lowerCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase__ : Any = n_copies
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ : Optional[int] = self.tokenizer(lowerCamelCase_, padding=lowerCamelCase_, return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = start_length
lowerCamelCase__ : List[str] = eof_strings
lowerCamelCase__ : List[str] = tokenizer
def __call__(self, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ : Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = re.split('(%s)' % '|'.join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=20 , **_lowerCamelCase ):
lowerCamelCase__ : List[str] = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
lowerCamelCase__ : str = batch['ids'].shape[-1]
lowerCamelCase__ : int = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
lowerCamelCase__ : Optional[Any] = batch['task_id'].repeat(_lowerCamelCase )
lowerCamelCase__ : List[Any] = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase__ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
lowerCamelCase__ : str = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ : Optional[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def lowerCamelCase_ ( ):
# Setup configuration
lowerCamelCase__ : int = HfArgumentParser(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ : Tuple = 'false'
if args.num_workers is None:
lowerCamelCase__ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ : Optional[int] = tokenizer.eos_token
lowerCamelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ : Any = load_dataset('openai_humaneval' )
lowerCamelCase__ : Optional[int] = load_metric('code_eval' )
lowerCamelCase__ : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase__ : Tuple = TokenizedDataset(_lowerCamelCase , human_eval['test'] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ : Union[str, Any] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ : List[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ : str = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
lowerCamelCase__ : List[str] = []
for task in tqdm(range(_lowerCamelCase ) ):
lowerCamelCase__ : int = human_eval['test'][task]['test']
lowerCamelCase__ : Union[str, Any] = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ : Any = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 696
| 1
|
'''simple docstring'''
import random
from typing import Any
def _lowerCAmelCase ( lowercase : List[str] ) ->Optional[Any]:
"""simple docstring"""
for _ in range(len(lowercase ) ):
lowercase__ = random.randint(0 , len(lowercase ) - 1 )
lowercase__ = random.randint(0 , len(lowercase ) - 1 )
lowercase__ = data[b], data[a]
return data
if __name__ == "__main__":
_lowerCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_lowerCAmelCase = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 161
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = True , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
A_ : str = False
if main_process_only:
A_ : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , disable=SCREAMING_SNAKE_CASE )
| 590
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase ( snake_case : Optional[int] , snake_case : str , snake_case : str ):
# Construct model
if gpta_config_file == "":
_lowerCAmelCase:Dict = GPTaConfig()
else:
_lowerCAmelCase:Dict = GPTaConfig.from_json_file(snake_case )
_lowerCAmelCase:List[Any] = GPTaModel(snake_case )
# Load weights from numpy
load_tf_weights_in_gpta(snake_case , snake_case , snake_case )
# Save pytorch-model
_lowerCAmelCase:Optional[int] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_lowerCAmelCase:Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , snake_case )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 714
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( UpperCamelCase_ ):
def __init__( self : Dict ,*a__ : List[str] ,**a__ : str) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' ,a__ ,)
super().__init__(*a__ ,**a__)
| 439
| 0
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return max(metric_fn(__lowercase , __lowercase ) for gt in ground_truths )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = [line.strip() for line in open(__lowercase , '''r''' ).readlines()]
snake_case_ : int = []
if args.gold_data_mode == "qa":
snake_case_ : Union[str, Any] = pd.read_csv(__lowercase , sep='''\t''' , header=__lowercase )
for answer_list in data[1]:
snake_case_ : Any = ast.literal_eval(__lowercase )
answers.append(__lowercase )
else:
snake_case_ : int = [line.strip() for line in open(__lowercase , '''r''' ).readlines()]
snake_case_ : Optional[int] = [[reference] for reference in references]
snake_case_ : int = 0
for prediction, ground_truths in zip(__lowercase , __lowercase ):
total += 1
em += metric_max_over_ground_truths(__lowercase , __lowercase , __lowercase )
fa += metric_max_over_ground_truths(__lowercase , __lowercase , __lowercase )
snake_case_ : str = 100.0 * em / total
snake_case_ : Any = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : List[str] = args.k
snake_case_ : Any = [line.strip() for line in open(__lowercase , '''r''' ).readlines()]
snake_case_ : str = [line.strip() for line in open(__lowercase , '''r''' ).readlines()]
snake_case_ : Optional[Any] = 0
for hypo, reference in zip(__lowercase , __lowercase ):
snake_case_ : int = set(hypo.split('''\t''' )[:k] )
snake_case_ : str = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ : Tuple = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
def strip_title(_UpperCamelCase ):
if title.startswith('''"''' ):
snake_case_ : int = title[1:]
if title.endswith('''"''' ):
snake_case_ : Optional[Any] = title[:-1]
return title
snake_case_ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowercase , return_tensors='''pt''' , padding=__lowercase , truncation=__lowercase , )['input_ids'].to(args.device )
snake_case_ : Union[str, Any] = rag_model.rag.question_encoder(__lowercase )
snake_case_ : Any = question_enc_outputs[0]
snake_case_ : Tuple = rag_model.retriever(
__lowercase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
snake_case_ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ : Dict = []
for docs in all_docs:
snake_case_ : Tuple = [strip_title(__lowercase ) for title in docs['title']]
provenance_strings.append('''\t'''.join(__lowercase ) )
return provenance_strings
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
snake_case_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowercase , return_tensors='''pt''' , padding=__lowercase , truncation=__lowercase )
snake_case_ : str = inputs_dict.input_ids.to(args.device )
snake_case_ : Tuple = inputs_dict.attention_mask.to(args.device )
snake_case_ : Any = rag_model.generate( # rag_model overwrites generate
__lowercase , attention_mask=__lowercase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__lowercase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case_ : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
if args.print_predictions:
for q, a in zip(__lowercase , __lowercase ):
logger.info('''Q: {} - A: {}'''.format(__lowercase , __lowercase ) )
return answers
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=__lowercase , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=__lowercase , choices=['''exact''', '''compressed''', '''legacy'''] , type=__lowercase , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=__lowercase , type=__lowercase , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=__lowercase , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=__lowercase , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=__lowercase , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=__lowercase , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=__lowercase , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=__lowercase , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=__lowercase , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=__lowercase , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=__lowercase , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : List[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = {}
if args.model_type is None:
snake_case_ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
snake_case_ : Dict = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
snake_case_ : List[Any] = args.n_docs
if args.index_name is not None:
snake_case_ : Dict = args.index_name
if args.index_path is not None:
snake_case_ : List[Any] = args.index_path
else:
snake_case_ : List[Any] = BartForConditionalGeneration
snake_case_ : Union[str, Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , __lowercase )
snake_case_ : Dict = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
snake_case_ : Tuple = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(__lowercase , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(__lowercase ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
snake_case_ : Union[str, Any] = RagRetriever.from_pretrained(__lowercase , **__lowercase )
snake_case_ : Optional[Any] = model_class.from_pretrained(__lowercase , retriever=__lowercase , **__lowercase )
model.retriever.init_retrieval()
else:
snake_case_ : List[str] = model_class.from_pretrained(__lowercase , **__lowercase )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
snake_case_ : Union[str, Any] = []
for line in tqdm(__lowercase ):
questions.append(line.strip() )
if len(__lowercase ) == args.eval_batch_size:
snake_case_ : Tuple = evaluate_batch_fn(__lowercase , __lowercase , __lowercase )
preds_file.write('''\n'''.join(__lowercase ) + '''\n''' )
preds_file.flush()
snake_case_ : int = []
if len(__lowercase ) > 0:
snake_case_ : Union[str, Any] = evaluate_batch_fn(__lowercase , __lowercase , __lowercase )
preds_file.write('''\n'''.join(__lowercase ) )
preds_file.flush()
score_fn(__lowercase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 60
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''deit'''
def __init__( self , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=2_2_4 , lowercase=1_6 , lowercase=3 , lowercase=True , lowercase=1_6 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Dict = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : int = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : str = num_channels
A_ : Dict = qkv_bias
A_ : Optional[Any] = encoder_stride
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1E-4
| 558
| 0
|
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase_ ="Create a default config file for Accelerate with only a few flags set."
def UpperCAmelCase ( _snake_case="no" , _snake_case = default_json_config_file , _snake_case = False ):
lowerCAmelCase = Path(__lowerCAmelCase )
path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
lowerCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}""" )
lowerCAmelCase = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
lowerCAmelCase = num_gpus
lowerCAmelCase = False
if num_gpus > 1:
lowerCAmelCase = """MULTI_GPU"""
else:
lowerCAmelCase = """NO"""
elif is_xpu_available() and use_xpu:
lowerCAmelCase = torch.xpu.device_count()
lowerCAmelCase = num_xpus
lowerCAmelCase = False
if num_xpus > 1:
lowerCAmelCase = """MULTI_XPU"""
else:
lowerCAmelCase = """NO"""
elif is_npu_available():
lowerCAmelCase = torch.npu.device_count()
lowerCAmelCase = num_npus
lowerCAmelCase = False
if num_npus > 1:
lowerCAmelCase = """MULTI_NPU"""
else:
lowerCAmelCase = """NO"""
else:
lowerCAmelCase = 0
lowerCAmelCase = True
lowerCAmelCase = 1
lowerCAmelCase = """NO"""
lowerCAmelCase = ClusterConfig(**__lowerCAmelCase )
config.to_json_file(__lowerCAmelCase )
return path
def UpperCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase = parser.add_parser('''default''' , parents=__lowerCAmelCase , help=__lowerCAmelCase , formatter_class=__lowerCAmelCase )
parser.add_argument(
'''--config_file''' , default=__lowerCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=__lowerCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=__lowerCAmelCase )
return parser
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] ="""maskformer-swin"""
__a : Optional[int] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 33
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase__ = 50003
lowerCAmelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Tuple = PLBartTokenizer
snake_case__ : Optional[int] = None
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Optional[int] = PLBartTokenizer(__lowerCAmelCase , language_codes='''base''' , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = PLBartTokenizer(__lowerCAmelCase , language_codes='''base''' , keep_accents=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_lowerCamelCase : Dict = tokenizer.vocab_size
_lowerCamelCase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) for x in range(end - 4 , __lowerCAmelCase )]
self.assertListEqual(__lowerCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
_lowerCamelCase : Tuple = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_lowerCamelCase : Any = tokenizer(__lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) , __lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = PLBartTokenizer(__lowerCAmelCase , language_codes='''multi''' , keep_accents=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_lowerCamelCase : Tuple = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCamelCase : Optional[int] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
_lowerCamelCase : str = tokenizer.vocab_size
_lowerCamelCase : Any = [tokenizer.convert_ids_to_tokens(__lowerCAmelCase ) for x in range(end - 7 , __lowerCAmelCase )]
self.assertListEqual(
__lowerCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
_lowerCamelCase : Tuple = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
_lowerCamelCase : Optional[Any] = tokenizer(__lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) , __lowerCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
snake_case__ : Optional[Any] = "uclanlp/plbart-python-en_XX"
snake_case__ : Optional[int] = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
snake_case__ : List[Any] = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
snake_case__ : Union[str, Any] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ):
"""simple docstring"""
_lowerCamelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
_lowerCamelCase : int = 1
return cls
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0_0_0_3 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
_lowerCamelCase : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_lowerCamelCase : str = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 2_0]
self.assertIsInstance(src_text[0] , __lowerCAmelCase )
_lowerCamelCase : List[str] = 1_0
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0_0_0_4, 5_0_0_0_1] )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[str] = tempfile.mkdtemp()
_lowerCamelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = PLBartTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors='''pt''' )
_lowerCamelCase : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
_lowerCamelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors='''pt''' )
_lowerCamelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0 , return_tensors='''pt''' )
_lowerCamelCase : Tuple = targets['''input_ids''']
_lowerCamelCase : Union[str, Any] = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_0_0_0_1,
} , )
| 83
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = '''hf-internal-testing/tiny-random-bert'''
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
lowerCAmelCase__ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = cached_file(__lowerCAmelCase , __lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(os.path.isfile(__lowerCAmelCase ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''9b8c223''' )
self.assertEqual(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''snapshots''' , __lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
_lowerCamelCase : Optional[int] = cached_file('''tiny-random-bert''' , __lowerCAmelCase )
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
_lowerCamelCase : str = cached_file(__lowerCAmelCase , __lowerCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : int = cached_file(__lowerCAmelCase , '''conf''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(__lowerCAmelCase , '''does not appear to have a file named''' ):
_lowerCamelCase : Dict = cached_file(__lowerCAmelCase , '''conf''' )
with open(os.path.join(__lowerCAmelCase , '''refs''' , '''main''' ) ) as f:
_lowerCamelCase : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , '''.no_exist''' , __lowerCAmelCase , '''conf''' ) ) )
_lowerCamelCase : str = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = cached_file(__lowerCAmelCase , '''conf''' , local_files_only=__lowerCAmelCase , _raise_exceptions_for_missing_entries=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : Any = mock.Mock()
_lowerCamelCase : Optional[Any] = 5_0_0
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Union[str, Any] = cached_file(__lowerCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=__lowerCAmelCase )
self.assertIsNone(__lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__lowerCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase , revision='''ahaha''' )
_lowerCamelCase : Dict = get_file_from_repo('''bert-base-cased''' , __lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Dict = json.loads(open(__lowerCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Any = Path(__lowerCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__lowerCAmelCase , '''a.txt''' ) , str(__lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(__lowerCAmelCase , '''b.txt''' ) )
| 83
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : str = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""biogpt"""
def __init__( self , __a=4_23_84 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10_24 , __a=0.0_2 , __a=1e-1_2 , __a=True , __a=True , __a=0.0 , __a=0.0 , __a=1 , __a=0 , __a=2 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = scale_embedding
__lowerCAmelCase = use_cache
__lowerCAmelCase = layerdrop
__lowerCAmelCase = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 710
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _UpperCamelCase :
'''simple docstring'''
def snake_case ( self , __a , __a , __a ):
return None
class _UpperCamelCase :
'''simple docstring'''
def snake_case ( self , __a , __a , __a , __a ):
return None
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple =[
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , "tf" , 12 , **__a )
@require_torch
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , "pt" , 12 , **__a )
@require_torch
@slow
def snake_case ( self ):
from transformers import BertModel
__lowerCAmelCase = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__a ) )
vocab_file.flush()
__lowerCAmelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__lowerCAmelCase = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , "pt" , 12 , __a )
@require_tf
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowerCAmelCase = self._test_export(__a , "tf" , 12 , **__a )
__lowerCAmelCase = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def snake_case ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowerCAmelCase = self._test_export(__a , "pt" , 12 , **__a )
__lowerCAmelCase = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def snake_case ( self , __a , __a , __a , __a=None , **__a ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__lowerCAmelCase = Path(__a ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def snake_case ( self ):
from transformers import BertModel
__lowerCAmelCase = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__lowerCAmelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__a , __a , "pt" )
@require_tf
@require_tokenizers
@slow
def snake_case ( self ):
from transformers import TFBertModel
__lowerCAmelCase = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__lowerCAmelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__a , __a , "tf" )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = FeatureExtractionPipeline(__a , __a )
__lowerCAmelCase = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def snake_case ( self ):
__lowerCAmelCase = ["input_ids", "attention_mask", "token_type_ids"]
__lowerCAmelCase = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
__lowerCAmelCase , __lowerCAmelCase = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__lowerCAmelCase , __lowerCAmelCase = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def snake_case ( self ):
__lowerCAmelCase = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 282
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return base * power(lowerCAmelCase__ ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
A_ = int(input("""Enter the base: """).strip())
A_ = int(input("""Enter the exponent: """).strip())
A_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
A_ = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 29
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = ["transformers", "torch", "note_seq"]
def __init__( self : List[Any] , *lowercase : List[Any] , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[str] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 686
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=1_0 , )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase = DDPMScheduler()
UpperCamelCase = AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCamelCase = pipe(generator=lowerCamelCase__ , steps=4 )
UpperCamelCase = output.audios[0]
UpperCamelCase = output.images[0]
UpperCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCamelCase = pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ )
UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase = DDIMScheduler()
UpperCamelCase = self.dummy_vqvae_and_unet
UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCamelCase = pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=1_0 )
UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase = self.dummy_unet_condition
UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase = torch.rand((1, 1, 1_0) )
UpperCamelCase = pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ )
UpperCamelCase = output.images[0]
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = torch_device
UpperCamelCase = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
UpperCamelCase = pipe(generator=lowerCamelCase__ )
UpperCamelCase = output.audios[0]
UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 350
|
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case_ : Tuple = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : int, _UpperCAmelCase : Optional[int]):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def __snake_case ( _UpperCAmelCase : np.ndarray, _UpperCAmelCase : Optional[str], _UpperCAmelCase : Optional[str]):
UpperCamelCase = to_pil_image(_UpperCAmelCase)
UpperCamelCase , UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(_UpperCAmelCase, lang=_UpperCAmelCase, output_type='''dict''', config=_UpperCAmelCase)
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(_UpperCAmelCase) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(_UpperCAmelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase)
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase))
assert len(_UpperCAmelCase) == len(_UpperCAmelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = ['''pixel_values''']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = "" , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase = get_size_dict(lowerCamelCase__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_value
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase = (size['''height'''], size['''width'''])
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowerCamelCase__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase , UpperCamelCase = apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
UpperCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
UpperCamelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase__ )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 350
| 1
|
'''simple docstring'''
def A_( A : int = 1000):
return sum(e for e in range(3 , UpperCamelCase__) if e % 3 == 0 or e % 5 == 0)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : Dict = RoCBertTokenizer
_A : Optional[Any] = None
_A : Dict = False
_A : List[str] = True
_A : Optional[int] = filter_non_english
def A_ ( self : int ):
super().setUp()
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for i, value in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = i
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_a , _a , ensure_ascii=_a )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_a , _a , ensure_ascii=_a )
def A_ ( self : List[str] ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_a , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_a ) , [5, 6, 2, 5, 7, 8] )
def A_ ( self : Tuple ):
UpperCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A_ ( self : str ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : List[Any] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Any ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
UpperCamelCase__ = {}
for i, token in enumerate(_a ):
UpperCamelCase__ = i
UpperCamelCase__ = RoCBertWordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A_ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A_ ( self : Optional[Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A_ ( self : List[str] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def A_ ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCamelCase__ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
UpperCamelCase__ = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
UpperCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A_ ( self : Any ):
UpperCamelCase__ = ['''的''', '''人''', '''有''']
UpperCamelCase__ = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
UpperCamelCase__ = False
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase__ = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
@slow
def A_ ( self : str ):
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.encode('''你好''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode('''你是谁''' , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A_ ( self : List[Any] ):
UpperCamelCase__ = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase__ = '''你好,你是谁'''
UpperCamelCase__ = tokenizer.tokenize(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_shape_ids(_a )
UpperCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(_a )
UpperCamelCase__ = tokenizer.prepare_for_model(
_a , _a , _a , add_special_tokens=_a )
UpperCamelCase__ = tokenizer.encode_plus(_a , add_special_tokens=_a )
self.assertEqual(_a , _a )
| 240
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
a : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 31
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase_ ( ) -> Any:
a__ : str = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=__a )
a__ : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
a__ : List[Any] = parser.parse_args()
if not hasattr(__a , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 37
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A__ : Optional[Any] = None
A__ : str = logging.get_logger(__name__)
A__ : Optional[int] = '▁'
A__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A__ : Dict = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A__ : int = {
'google/pegasus-xsum': 5_12,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PegasusTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict, lowerCamelCase : str=None, lowerCamelCase : List[str]=None, lowerCamelCase : int="<pad>", lowerCamelCase : str="</s>", lowerCamelCase : Optional[Any]="<unk>", lowerCamelCase : Any="<mask_2>", lowerCamelCase : Optional[Any]="<mask_1>", lowerCamelCase : int=None, lowerCamelCase : Dict=103, **lowerCamelCase : List[Any], ):
'''simple docstring'''
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(lowerCamelCase )}, but is"""
F""" {type(lowerCamelCase )}""" )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2, self.offset )]
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, pad_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowercase__ ( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : Optional[Any], lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file, lowerCamelCase )
return (out_vocab_file,)
| 183
| 0
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a , __a ) -> Any:
'''simple docstring'''
try:
with open(__a , '''rb''' ) as flax_state_f:
UpperCamelCase__ :Any = from_bytes(__a , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__a ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__a , __a )
def a ( __a , __a ) -> Dict:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCamelCase__ :str = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values()
if any(__a ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCamelCase__ :List[str] = jax.tree_util.tree_map(
lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a )
UpperCamelCase__ :Optional[Any] = ''''''
UpperCamelCase__ :Tuple = flatten_dict(__a , sep='''.''' )
UpperCamelCase__ :Dict = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase__ :Any = []
UpperCamelCase__ :List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ :Union[str, Any] = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase__ :Dict = flax_key_tuple_array[:-1] + ['''weight''']
UpperCamelCase__ :List[Any] = jnp.transpose(__a , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase__ :int = flax_key_tuple_array[:-1] + ['''weight''']
UpperCamelCase__ :int = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase__ :Dict = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__a ):
UpperCamelCase__ :List[str] = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
UpperCamelCase__ :List[Any] = '''.'''.join(__a )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase__ :List[Any] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor
UpperCamelCase__ :Optional[Any] = torch.from_numpy(__a )
# remove from missing keys
missing_keys.remove(__a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__a )
pt_model.load_state_dict(__a )
# re-transform missing_keys to list
UpperCamelCase__ :int = list(__a )
if len(__a ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(__a ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
return pt_model
| 280
|
'''simple docstring'''
import torch
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = n_token
UpperCamelCase__ :List[Any] = d_embed
UpperCamelCase__ :Dict = d_proj
UpperCamelCase__ :Dict = cutoffs + [n_token]
UpperCamelCase__ :Union[str, Any] = [0] + self.cutoffs
UpperCamelCase__ :Any = div_val
UpperCamelCase__ :int = self.cutoffs[0]
UpperCamelCase__ :List[Any] = len(self.cutoffs ) - 1
UpperCamelCase__ :List[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase__ :Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase__ :Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase__ :Union[str, Any] = nn.ModuleList()
UpperCamelCase__ :str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
else:
self.out_projs.append(UpperCamelCase_ )
self.out_layers.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase_ , r_idx - l_idx ) )
UpperCamelCase__ :Tuple = keep_order
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if proj is None:
UpperCamelCase__ :List[str] = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase__ :Any = nn.functional.linear(UpperCamelCase_ , proj.t().contiguous() )
UpperCamelCase__ :Union[str, Any] = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase__ :Optional[Any] = hidden[..., :-1, :].contiguous()
UpperCamelCase__ :Optional[Any] = labels[..., 1:].contiguous()
UpperCamelCase__ :Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase__ :str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCamelCase__ :int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase__ :Optional[int] = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase__ :int = labels != -100
UpperCamelCase__ :List[Any] = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :str = (
-nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase__ :Dict = nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :str = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :Optional[Any] = self.out_layers[i].weight
UpperCamelCase__ :Optional[int] = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :str = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
if labels is None:
UpperCamelCase__ :Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase__ :Any = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :Any = 0
UpperCamelCase__ :str = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase__ :Any = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase__ :int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase__ :Tuple = labels.index_select(0 , UpperCamelCase_ ) - l_idx
UpperCamelCase__ :str = head_logprob.index_select(0 , UpperCamelCase_ )
UpperCamelCase__ :int = hidden.index_select(0 , UpperCamelCase_ )
else:
UpperCamelCase__ :Dict = hidden
if i == 0:
if labels is not None:
UpperCamelCase__ :Any = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :List[str] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :str = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase__ :List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase__ :Dict = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase__ :Optional[Any] = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :str = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :List[Any] = self.out_layers[i].weight
UpperCamelCase__ :List[Any] = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :Optional[int] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase__ :Union[str, Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :int = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase__ :str = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :List[str] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :Optional[Any] = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase__ :Union[str, Any] = logprob_i
return out
| 280
| 1
|
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class snake_case ( enum.Enum ):
'''simple docstring'''
snake_case_ : Any = 0
snake_case_ : Tuple = 1
snake_case_ : int = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_snake_case : int = None
if self.model.config.prefix is not None:
_snake_case : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_snake_case : List[str] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_snake_case , _snake_case , _snake_case : Optional[int] = self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params)
_snake_case : Dict = {**self._preprocess_params, **preprocess_params}
_snake_case : Optional[int] = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : List[Any] = {}
if prefix is not None:
_snake_case : Tuple = prefix
if prefix:
_snake_case : Optional[int] = self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Any = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""")
_snake_case : Tuple = handle_long_generation
preprocess_params.update(lowerCAmelCase)
_snake_case : str = generate_kwargs
_snake_case : Optional[int] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
_snake_case : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
_snake_case : Tuple = ReturnType.TENSORS
if return_type is not None:
_snake_case : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
_snake_case : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case : str = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
if len(lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
_snake_case : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase)
def __call__( self : Tuple , lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
return super().__call__(lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : str="" , lowerCAmelCase : Any=None , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
_snake_case : Dict = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework)
_snake_case : Tuple = prompt_text
if handle_long_generation == "hole":
_snake_case : int = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_snake_case : Optional[int] = generate_kwargs["""max_new_tokens"""]
else:
_snake_case : Dict = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_snake_case : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
_snake_case : List[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_snake_case : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : int , lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = model_inputs["""input_ids"""]
_snake_case : List[Any] = model_inputs.get("""attention_mask""" , lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Any = 1
else:
_snake_case : List[Any] = input_ids.shape[0]
_snake_case : Tuple = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_snake_case : Any = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
_snake_case : Dict = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_snake_case : Optional[int] = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_snake_case : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_snake_case : Optional[int] = self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
_snake_case : List[Any] = generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
_snake_case : Dict = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=ReturnType.FULL_TEXT , lowerCAmelCase : Union[str, Any]=True) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = model_outputs["""generated_sequence"""][0]
_snake_case : List[str] = model_outputs["""input_ids"""]
_snake_case : Optional[Any] = model_outputs["""prompt_text"""]
_snake_case : str = generated_sequence.numpy().tolist()
_snake_case : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_snake_case : Union[str, Any] = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_snake_case : int = self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_snake_case : str = 0
else:
_snake_case : List[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
_snake_case : Any = prompt_text + text[prompt_length:]
else:
_snake_case : Union[str, Any] = text[prompt_length:]
_snake_case : List[str] = {"""generated_text""": all_text}
records.append(lowerCAmelCase)
return records
| 477
| 1
|
"""simple docstring"""
def __a ( A ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(A , A ):
raise TypeError("Input value must be a 'int' type" )
return bin(A ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668
|
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = ["pixel_values"]
def __init__( self : Any , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : Union[int, float] = 1 / 2_5_5 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : Optional[Any] , ) -> None:
super().__init__(**__snake_case )
__magic_name__: Optional[Any] = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
__magic_name__: Dict = get_size_dict(__snake_case )
__magic_name__: Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__magic_name__: Optional[Any] = get_size_dict(__snake_case , param_name="""crop_size""" )
__magic_name__: Any = do_resize
__magic_name__: Optional[Any] = size
__magic_name__: Union[str, Any] = resample
__magic_name__: List[Any] = do_center_crop
__magic_name__: Union[str, Any] = crop_size
__magic_name__: int = do_rescale
__magic_name__: Optional[Any] = rescale_factor
__magic_name__: Any = do_normalize
__magic_name__: List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__: int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PIL.Image.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : int , ) -> np.ndarray:
__magic_name__: List[str] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
__snake_case , size=(size["""height"""], size["""width"""]) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[str] , ) -> np.ndarray:
__magic_name__: Optional[Any] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Any , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ) -> Any:
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[Any] , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : Dict=None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[Any] , ) -> PIL.Image.Image:
__magic_name__: Optional[int] = do_resize if do_resize is not None else self.do_resize
__magic_name__: Dict = resample if resample is not None else self.resample
__magic_name__: Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__: Any = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__: str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__: Any = image_mean if image_mean is not None else self.image_mean
__magic_name__: Dict = image_std if image_std is not None else self.image_std
__magic_name__: Optional[Any] = size if size is not None else self.size
__magic_name__: List[str] = get_size_dict(__snake_case )
__magic_name__: int = crop_size if crop_size is not None else self.crop_size
__magic_name__: str = get_size_dict(__snake_case , param_name="""crop_size""" )
__magic_name__: Optional[Any] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__magic_name__: Optional[Any] = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
__magic_name__: str = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
__magic_name__: List[str] = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
__magic_name__: Union[str, Any] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
__magic_name__: Optional[int] = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
__magic_name__: List[str] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__magic_name__: Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 96
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 100 , ) -> float:
lowerCAmelCase__ : int = x_start
lowerCAmelCase__ : Tuple = fnc(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = 0.0
for _ in range(UpperCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCAmelCase__ : Optional[int] = (x_end - x_start) / steps + xa
lowerCAmelCase__ : Tuple = fnc(UpperCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCAmelCase__ : Optional[int] = xa
lowerCAmelCase__ : List[str] = fxa
return length
if __name__ == "__main__":
def __lowerCAmelCase ( UpperCamelCase ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowerCAmelCase_ = 10
while i <= 10_00_00:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 700
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( UpperCamelCase ) -> str:
for param in module.parameters():
lowerCAmelCase__ : int = False
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ : Optional[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = plt.imshow(UpperCamelCase )
fig.axes.get_xaxis().set_visible(UpperCamelCase )
fig.axes.get_yaxis().set_visible(UpperCamelCase )
plt.show()
def __lowerCAmelCase ( ) -> str:
lowerCAmelCase__ : Dict = datetime.now()
lowerCAmelCase__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 470
| 0
|
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = word.split()
def justify(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = max_width - width
__SCREAMING_SNAKE_CASE : Optional[Any] = len(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__SCREAMING_SNAKE_CASE : Any = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__SCREAMING_SNAKE_CASE : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__SCREAMING_SNAKE_CASE : List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase__ ):
num_spaces_between_words_list[i] += 1
__SCREAMING_SNAKE_CASE : Tuple = []
for i in range(UpperCamelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : list[str] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
for word in words:
if width + len(UpperCamelCase__ ) + len(UpperCamelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase__ )
width += len(UpperCamelCase__ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
# reset new line and new width
__SCREAMING_SNAKE_CASE : Union[str, Any] = [word], len(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = max_width - width - len(UpperCamelCase__ )
answer.append(''' '''.join(UpperCamelCase__ ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = '''rwkv'''
UpperCamelCase : Optional[int] = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int]=50277 , UpperCAmelCase__ : List[Any]=1024 , UpperCAmelCase__ : Dict=4096 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=1E-5 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : List[Any] , ) -> Optional[int]:
_a : Dict = vocab_size
_a : List[Any] = context_length
_a : int = hidden_size
_a : Dict = num_hidden_layers
_a : List[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
_a : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
_a : int = layer_norm_epsilon
_a : List[str] = rescale_every
_a : List[str] = use_cache
_a : List[str] = bos_token_id
_a : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 389
| 0
|
"""simple docstring"""
from torch import nn
def lowerCAmelCase__ ( lowerCamelCase__ ) -> List[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 706
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Union[str, Any]:
A = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
A = re.match(R'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCamelCase__ )
if matches:
A = float(matches[1] )
A = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A = 1001
A = 'imagenet-1k-id2label.json'
A = 'huggingface/label-files'
A = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A = 'background'
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( ) -> List[Any]:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
A = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , )
A = image_processor(images=prepare_img() , return_tensors='pt' )
A = model(**lowerCamelCase__ )
A = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
A = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
A = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
A = 'google/' + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 109
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ChineseCLIPFeatureExtractor']
a_ = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 296
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
a_ = TypeVar('_T')
class __SCREAMING_SNAKE_CASE ( Generic[_T] ):
def __init__( self : Union[str, Any] , __lowercase : Iterable[_T] | None = None ) -> None:
SCREAMING_SNAKE_CASE__ : list[_T] =list(iterable or [] )
SCREAMING_SNAKE_CASE__ : list[_T] =[]
def __len__( self : Union[str, Any] ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Dict ) -> str:
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def __magic_name__ ( self : str , __lowercase : _T ) -> None:
self._stacka.append(__lowercase )
def __magic_name__ ( self : List[Any] ) -> _T:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._stacka.pop
SCREAMING_SNAKE_CASE__ : Optional[Any] =self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 296
| 1
|
'''simple docstring'''
import os
from pathlib import Path
def lowercase ( ):
'''simple docstring'''
from torch.utils.cpp_extension import load
UpperCAmelCase : Optional[Any] = Path(__magic_name__ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase : str = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , __magic_name__ , with_cuda=__magic_name__ , extra_include_paths=[str(__magic_name__ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 609
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : int
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__magic_name__ ) )]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase : List[Any] = all_rotations(__magic_name__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__magic_name__ ),
}
return response
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase : Optional[int] = int(__magic_name__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__magic_name__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase : str = [""] * len(__magic_name__ )
for _ in range(len(__magic_name__ ) ):
for i in range(len(__magic_name__ ) ):
UpperCAmelCase : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a : Tuple = "Provide a string that I will generate its BWT transform: "
a : str = input(entry_msg).strip()
a : Tuple = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
a : Any = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 609
| 1
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def run_func(SCREAMING_SNAKE_CASE ):
@wraps(SCREAMING_SNAKE_CASE )
def run_in_eager_mode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
return func(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@wraps(SCREAMING_SNAKE_CASE )
@tf.function(experimental_compile=SCREAMING_SNAKE_CASE )
def run_in_graph_mode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
return func(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = random.Random()
UpperCamelCase : Tuple = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(SCREAMING_SNAKE_CASE , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : TensorFlowBenchmarkArguments
__lowerCAmelCase : PretrainedConfig
__lowerCAmelCase : str = "TensorFlow"
@property
def _a ( self ):
'''simple docstring'''
return tf.__version__
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase : Optional[int] = self._prepare_inference_func(_A , _A , _A )
return self._measure_speed(_inference )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase : str = self._prepare_train_func(_A , _A , _A )
return self._measure_speed(_train )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _A )
UpperCamelCase : int = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase : Dict = self._prepare_inference_func(_A , _A , _A )
return self._measure_memory(_inference )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _A )
UpperCamelCase : List[str] = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase : Any = self._prepare_train_func(_A , _A , _A )
return self._measure_memory(_train )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase : Optional[int] = (
hasattr(_A , """architectures""" )
and isinstance(config.architectures , _A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase : Optional[int] = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase : str = getattr(_A , _A )
UpperCamelCase : Dict = model_cls(_A )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase : Optional[int] = TF_MODEL_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
UpperCamelCase : Optional[Any] = config.vocab_size if hasattr(_A , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase : Dict = random_input_ids(_A , _A , _A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_A , decoder_input_ids=_A , training=_A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_A , training=_A )
UpperCamelCase : str = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase : int = (
hasattr(_A , """architectures""" )
and isinstance(config.architectures , _A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase : Optional[int] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase : Any = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase : Optional[int] = getattr(_A , _A )
UpperCamelCase : Union[str, Any] = model_cls(_A )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase : str = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
UpperCamelCase : str = config.vocab_size if hasattr(_A , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase : Union[str, Any] = random_input_ids(_A , _A , _A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase : str = model(_A , decoder_input_ids=_A , labels=_A , training=_A )[0]
UpperCamelCase : Any = tf.gradients(_A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase : List[str] = model(_A , labels=_A , training=_A )[0]
UpperCamelCase : int = tf.gradients(_A , model.trainable_variables )
return gradients
UpperCamelCase : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _a ( self , _A ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(_A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase : int = timeit.repeat(
_A , repeat=self.args.repeat , number=1_0 , )
return min(_A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def _a ( self , _A ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
UpperCamelCase : List[str] = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
UpperCamelCase : str = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase : Dict = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase : List[str] = nvml.nvmlDeviceGetMemoryInfo(_A )
UpperCamelCase : Union[str, Any] = meminfo.used
UpperCamelCase : Tuple = Memory(_A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
UpperCamelCase : Dict = None
else:
UpperCamelCase : List[Any] = measure_peak_memory_cpu(_A )
UpperCamelCase : Dict = Memory(_A ) if isinstance(_A , _A ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase : List[str] = stop_memory_tracing(_A )
if memory is None:
UpperCamelCase : str = summary.total
else:
UpperCamelCase : Tuple = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 102
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Optional[Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = """instructblip_vision_model"""
def __init__( self : List[Any] , _A : Dict=1408 , _A : Union[str, Any]=6144 , _A : Optional[int]=39 , _A : Optional[int]=16 , _A : Optional[int]=224 , _A : Any=14 , _A : Optional[int]="gelu" , _A : str=1E-6 , _A : str=0.0 , _A : str=1E-10 , _A : Optional[Any]=True , **_A : List[Any] , ) -> Dict:
super().__init__(**_A )
__magic_name__ : Optional[int] = hidden_size
__magic_name__ : int = intermediate_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = patch_size
__magic_name__ : Tuple = image_size
__magic_name__ : int = initializer_range
__magic_name__ : str = attention_dropout
__magic_name__ : int = layer_norm_eps
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Tuple = qkv_bias
@classmethod
def __lowerCAmelCase ( cls : List[Any] , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : Union[str, Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = """instructblip_qformer"""
def __init__( self : Dict , _A : Dict=30522 , _A : List[str]=768 , _A : Tuple=12 , _A : List[Any]=12 , _A : Optional[int]=3072 , _A : Optional[Any]="gelu" , _A : Tuple=0.1 , _A : Any=0.1 , _A : int=512 , _A : Tuple=0.02 , _A : Optional[Any]=1E-12 , _A : List[Any]=0 , _A : Tuple="absolute" , _A : Dict=2 , _A : Tuple=1408 , **_A : int , ) -> Optional[int]:
super().__init__(pad_token_id=_A , **_A )
__magic_name__ : Any = vocab_size
__magic_name__ : str = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : str = hidden_act
__magic_name__ : List[str] = intermediate_size
__magic_name__ : List[str] = hidden_dropout_prob
__magic_name__ : Tuple = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : Union[str, Any] = position_embedding_type
__magic_name__ : Any = cross_attention_frequency
__magic_name__ : int = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls : int , _A : Union[str, os.PathLike] , **_A : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : str = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__magic_name__ : Union[str, Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = """instructblip"""
A_ : Any = True
def __init__( self : int , _A : Optional[int]=None , _A : List[str]=None , _A : Union[str, Any]=None , _A : Any=32 , **_A : int ) -> Any:
super().__init__(**_A )
if vision_config is None:
__magic_name__ : Any = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__magic_name__ : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__magic_name__ : List[str] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__magic_name__ : Union[str, Any] = InstructBlipVisionConfig(**_A )
__magic_name__ : str = InstructBlipQFormerConfig(**_A )
__magic_name__ : int = text_config['model_type'] if 'model_type' in text_config else 'opt'
__magic_name__ : Tuple = CONFIG_MAPPING[text_model_type](**_A )
__magic_name__ : Optional[Any] = self.text_config.tie_word_embeddings
__magic_name__ : int = self.text_config.is_encoder_decoder
__magic_name__ : List[Any] = num_query_tokens
__magic_name__ : Tuple = self.vision_config.hidden_size
__magic_name__ : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__magic_name__ : int = 1.0
__magic_name__ : List[Any] = 0.02
@classmethod
def __lowerCAmelCase ( cls : str , _A : InstructBlipVisionConfig , _A : InstructBlipQFormerConfig , _A : PretrainedConfig , **_A : int , ) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : int = copy.deepcopy(self.__dict__ )
__magic_name__ : str = self.vision_config.to_dict()
__magic_name__ : List[str] = self.qformer_config.to_dict()
__magic_name__ : Tuple = self.text_config.to_dict()
__magic_name__ : str = self.__class__.model_type
return output
| 561
| 0
|
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class a__ ( __SCREAMING_SNAKE_CASE ):
def __get__( self : List[Any] , A_ : Tuple , A_ : Tuple=None ) -> int:
"""simple docstring"""
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
lowerCamelCase_: Optional[Any] = """__cached_""" + self.fget.__name__
lowerCamelCase_: Union[str, Any] = getattr(A_ , A_ , A_ )
if cached is None:
lowerCamelCase_: str = self.fget(A_ )
setattr(A_ , A_ , A_ )
return cached
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def UpperCAmelCase_ ( _UpperCAmelCase ):
if is_torch_fx_proxy(_UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(_UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_UpperCAmelCase , np.ndarray )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return isinstance(_UpperCAmelCase , np.ndarray )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return _is_numpy(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
import torch
return isinstance(_UpperCAmelCase , torch.Tensor )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return False if not is_torch_available() else _is_torch(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
import torch
return isinstance(_UpperCAmelCase , torch.device )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_device(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
import torch
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
else:
return False
return isinstance(_UpperCAmelCase , torch.dtype )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_dtype(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
import tensorflow as tf
return isinstance(_UpperCAmelCase , tf.Tensor )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return False if not is_tf_available() else _is_tensorflow(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_UpperCAmelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(_UpperCAmelCase )
return type(_UpperCAmelCase ) == tf.Tensor
def UpperCAmelCase_ ( _UpperCAmelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(_UpperCAmelCase , jnp.ndarray )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return False if not is_flax_available() else _is_jax(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return [to_py_obj(_UpperCAmelCase ) for o in obj]
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase ).tolist()
elif isinstance(_UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCAmelCase_ ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return np.array(_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase )
else:
return obj
class a__ ( __SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCamelCase_: Optional[Any] = getattr(self , class_fields[0].name )
lowerCamelCase_: List[str] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ , A_ ):
lowerCamelCase_: List[str] = first_field.items()
lowerCamelCase_: List[str] = True
else:
try:
lowerCamelCase_: Any = iter(A_ )
lowerCamelCase_: Dict = True
except TypeError:
lowerCamelCase_: Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ , (list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] , A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase_: Tuple = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCamelCase_: int = element[1]
elif first_field is not None:
lowerCamelCase_: Dict = first_field
else:
for field in class_fields:
lowerCamelCase_: Tuple = getattr(self , field.name )
if v is not None:
lowerCamelCase_: int = v
def __delitem__( self : str , *A_ : List[str] , **A_ : int ) -> List[Any]:
"""simple docstring"""
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowerCAmelCase ( self : Optional[Any] , *A_ : Tuple , **A_ : Union[str, Any] ) -> str:
"""simple docstring"""
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowerCAmelCase ( self : List[Any] , *A_ : Any , **A_ : str ) -> str:
"""simple docstring"""
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowerCAmelCase ( self : List[str] , *A_ : Optional[int] , **A_ : Optional[int] ) -> Any:
"""simple docstring"""
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Optional[Any] , A_ : Tuple ) -> str:
"""simple docstring"""
if isinstance(A_ , A_ ):
lowerCamelCase_: Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] , A_ : List[Any] , A_ : str ) -> Any:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ , A_ )
super().__setattr__(A_ , A_ )
def __setitem__( self : int , A_ : Any , A_ : Tuple ) -> Dict:
"""simple docstring"""
super().__setitem__(A_ , A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ , A_ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@classmethod
def lowerCAmelCase ( cls : Tuple , A_ : List[str] ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "longest"
_A = "max_length"
_A = "do_not_pad"
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "pt"
_A = "tf"
_A = "np"
_A = "jax"
class a__ :
def __init__( self : List[Any] , A_ : List[ContextManager] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Tuple = context_managers
lowerCamelCase_: Any = ExitStack()
def __enter__( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self : Union[str, Any] , *A_ : Dict , **A_ : List[str] ) -> List[str]:
"""simple docstring"""
self.stack.__exit__(*A_ , **A_ )
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: List[Any] = infer_framework(_UpperCAmelCase )
if framework == "tf":
lowerCamelCase_: Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase_: List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase_: Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: Tuple = model_class.__name__
lowerCamelCase_: Optional[Any] = infer_framework(_UpperCAmelCase )
if framework == "tf":
lowerCamelCase_: str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase_: Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase_: int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = "" , _UpperCAmelCase = "." ):
def _flatten_dict(_UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase="." ):
for k, v in d.items():
lowerCamelCase_: Optional[int] = str(_UpperCAmelCase ) + delimiter + str(_UpperCAmelCase ) if parent_key else k
if v and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
yield from flatten_dict(_UpperCAmelCase , _UpperCAmelCase , delimiter=_UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
@contextmanager
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
if is_numpy_array(_UpperCAmelCase ):
return np.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.T if axes is None else array.permute(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(_UpperCAmelCase , perm=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for transpose: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
if is_numpy_array(_UpperCAmelCase ):
return np.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.reshape(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.reshape(_UpperCAmelCase , _UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for reshape: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
if is_numpy_array(_UpperCAmelCase ):
return np.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
if is_numpy_array(_UpperCAmelCase ):
return np.expand_dims(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.unsqueeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase_ ( _UpperCAmelCase ):
if is_numpy_array(_UpperCAmelCase ):
return np.size(_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.size(_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
for key, value in auto_map.items():
if isinstance(_UpperCAmelCase , (tuple, list) ):
lowerCamelCase_: Any = [f"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase_: Union[str, Any] = f"""{repo_id}--{value}"""
return auto_map
def UpperCAmelCase_ ( _UpperCAmelCase ):
for base_class in inspect.getmro(_UpperCAmelCase ):
lowerCamelCase_: List[str] = base_class.__module__
lowerCamelCase_: Optional[Any] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 717
|
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: List[str] = len(_UpperCAmelCase )
for i in range(1 , _UpperCAmelCase ):
lowerCamelCase_: str = collection[i]
lowerCamelCase_: str = 0
lowerCamelCase_: Optional[Any] = i - 1
while low <= high:
lowerCamelCase_: str = (low + high) // 2
if val < collection[mid]:
lowerCamelCase_: str = mid - 1
else:
lowerCamelCase_: Optional[int] = mid + 1
for j in range(_UpperCAmelCase , _UpperCAmelCase , -1 ):
lowerCamelCase_: Optional[int] = collection[j - 1]
lowerCamelCase_: Optional[Any] = val
return collection
if __name__ == "__main__":
lowercase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowercase : Dict = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 584
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(A ) , 'Tatoeba directory does not exist.' )
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def a__ (self ) -> Tuple:
"""simple docstring"""
_a , _a = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 11
|
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase : str = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return torch.atana(lowercase ,lowercase ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Tuple = torch.sin(t * math.pi / 2 ) ** 2
snake_case : Tuple = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase ,lowercase )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A ) -> Any:
super().__init__()
snake_case : Optional[int] = DiffusionAttnUnetaD(A , n_attn_layers=4 )
snake_case : Tuple = deepcopy(self.diffusion )
snake_case : List[Any] = torch.quasirandom.SobolEngine(1 , scramble=A )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : str = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCamelCase : int = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowerCamelCase : Optional[Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowerCamelCase : int = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowerCamelCase : Optional[Any] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowerCamelCase : int = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowerCamelCase : Dict = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if name.startswith("""skip""" ):
return name.replace("""skip""" ,RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] ,RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
for key, value in ATTN_MAP.items():
if name.startswith(lowercase ) and not isinstance(lowercase ,lowercase ):
return name.replace(lowercase ,lowercase )
elif name.startswith(lowercase ):
return [name.replace(lowercase ,lowercase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=13 ) -> Any:
snake_case : List[Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" ,"""time_proj""" )
snake_case : Optional[Any] = 0
if string.startswith("""net.3.""" ):
depth += 1
snake_case : Union[str, Any] = string[6:]
elif string.startswith("""net.""" ):
snake_case : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
snake_case : List[Any] = string[7:]
if string.startswith("""main.""" ):
snake_case : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
snake_case : List[str] = string[:2]
snake_case : str = string[2:]
else:
snake_case : Optional[Any] = string[0]
snake_case : str = string[1:]
if depth == max_depth:
snake_case : str = MID_NUM_TO_LAYER[layer_num]
snake_case : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowercase ) < 7:
snake_case : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
snake_case : List[Any] = f"""down_blocks.{depth}"""
elif depth > 0 and int(lowercase ) > 7:
snake_case : Optional[int] = UP_NUM_TO_LAYER[layer_num]
snake_case : Dict = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
snake_case : Tuple = DEPTH_0_TO_LAYER[layer_num]
snake_case : Union[str, Any] = f"""up_blocks.{max_depth - 1}""" if int(lowercase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
snake_case : Any = string_left[1:]
if "resnets" in new_layer:
snake_case : Any = convert_resconv_naming(lowercase )
elif "attentions" in new_layer:
snake_case : int = convert_attn_naming(lowercase )
snake_case : Optional[Any] = new_string_left
if not isinstance(lowercase ,lowercase ):
snake_case : Optional[Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
snake_case : Union[str, Any] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
snake_case : List[str] = rename(lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase ,lowercase ):
snake_case : Any = transform_conv_attns(lowercase ,lowercase ,lowercase )
else:
snake_case : Optional[Any] = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if len(lowercase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case : int = v[:, :, 0]
else:
# bias
snake_case : Any = v
else:
# qkv matrices
snake_case : str = v.shape[0]
snake_case : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case : Tuple = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
snake_case : Union[str, Any] = download(lowercase )
snake_case : Tuple = MODELS_MAP[model_name]["""sample_rate"""]
snake_case : Tuple = MODELS_MAP[model_name]["""sample_size"""]
snake_case : Tuple = Object()
snake_case : int = sample_size
snake_case : List[str] = sample_rate
snake_case : int = 0
snake_case : List[Any] = UNetaDModel(sample_size=lowercase ,sample_rate=lowercase )
snake_case : Optional[Any] = diffusers_model.state_dict()
snake_case : Optional[Any] = DiffusionUncond(lowercase )
orig_model.load_state_dict(torch.load(args.model_path ,map_location=lowercase )["""state_dict"""] )
snake_case : Union[str, Any] = orig_model.diffusion_ema.eval()
snake_case : Optional[Any] = orig_model.state_dict()
snake_case : Union[str, Any] = rename_orig_weights(lowercase )
snake_case : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(lowercase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
snake_case : Optional[int] = value.squeeze()
snake_case : int = value
diffusers_model.load_state_dict(lowercase )
snake_case : List[Any] = 100
snake_case : List[str] = 33
snake_case : Optional[Any] = IPNDMScheduler(num_train_timesteps=lowercase )
snake_case : Tuple = torch.manual_seed(lowercase )
snake_case : Optional[int] = torch.randn([1, 2, config.sample_size] ,generator=lowercase ).to(lowercase )
snake_case : Union[str, Any] = torch.linspace(1 ,0 ,steps + 1 ,device=lowercase )[:-1]
snake_case : Optional[Any] = get_crash_schedule(lowercase )
snake_case : Tuple = DanceDiffusionPipeline(unet=lowercase ,scheduler=lowercase )
snake_case : Dict = torch.manual_seed(33 )
snake_case : str = pipe(num_inference_steps=lowercase ,generator=lowercase ).audios
snake_case : Tuple = sampling.iplms_sample(lowercase ,lowercase ,lowercase ,{} )
snake_case : Any = generated.clamp(-1 ,1 )
snake_case : List[Any] = (generated - audio).abs().sum()
snake_case : Tuple = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" ,lowercase )
print("""Diff max""" ,lowercase )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase : List[str] = parser.parse_args()
main(args)
| 684
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684
| 1
|
'''simple docstring'''
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase (*__A):
"""simple docstring"""
with open(_lowerCAmelCase , '''r''') as fh:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX)
try:
print(*_lowerCAmelCase)
finally:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN)
lowercase_ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowercase_ = torch.device("cuda", local_rank)
lowercase_ = socket.gethostname()
lowercase_ = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowercase_ = dist.get_rank()
lowercase_ = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 11
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_UpperCamelCase : Any = False
class UpperCAmelCase_ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : int = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ : Tuple = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Any = generator.manual_seed(0 )
lowercase__ : List[Any] = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : Optional[int] = 'cyberpunk 2077'
lowercase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : List[Any] = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
lowercase__ : Any = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase__ : List[str] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : Any = 'A painting of a squirrel eating a burger '
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : str = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
lowercase__ : Optional[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase__ : Optional[int] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : List[Any] = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase__ : Optional[int] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 599
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _a ( _lowercase : int ):
'''simple docstring'''
def is_in_circle(_lowercase : float , _lowercase : float ) -> bool:
__UpperCAmelCase : str = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCAmelCase : Any = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_lowercase ) )
# The ratio of the area for circle to square is pi/4.
__UpperCAmelCase : List[Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def _a ( _lowercase : int , _lowercase : Callable[[float], float] , _lowercase : float = 0.0 , _lowercase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(_lowercase , _lowercase ) ) for _ in range(_lowercase ) ) * (max_value - min_value)
def _a ( _lowercase : int , _lowercase : float = 0.0 , _lowercase : float = 1.0 ):
'''simple docstring'''
def identity_function(_lowercase : float ) -> float:
return x
__UpperCAmelCase : Tuple = area_under_curve_estimator(
_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase : Any = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('''******************''' )
def _a ( _lowercase : int ):
'''simple docstring'''
def function_to_integrate(_lowercase : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCAmelCase : Union[str, Any] = area_under_curve_estimator(
_lowercase , _lowercase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :List[Any] = logging.get_logger(__name__)
def _a ( _lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__UpperCAmelCase : int = BitConfig(
conv_layer=_lowercase , num_labels=1000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if "stem.conv" in name:
__UpperCAmelCase : Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
__UpperCAmelCase : Dict = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
__UpperCAmelCase : List[Any] = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__UpperCAmelCase : List[str] = '''bit.encoder.''' + name
return name
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _a ( _lowercase : Tuple , _lowercase : Dict , _lowercase : Tuple=False ):
'''simple docstring'''
__UpperCAmelCase : Any = get_config(_lowercase )
# load original model from timm
__UpperCAmelCase : Tuple = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
__UpperCAmelCase : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__UpperCAmelCase : Optional[Any] = state_dict.pop(_lowercase )
__UpperCAmelCase : int = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__UpperCAmelCase : Any = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
__UpperCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_lowercase ) )
__UpperCAmelCase : Union[str, Any] = transform.transforms
__UpperCAmelCase : str = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__UpperCAmelCase : List[Any] = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : Any = transform(_lowercase ).unsqueeze(0 )
__UpperCAmelCase : List[Any] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(_lowercase )
__UpperCAmelCase : Union[str, Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
__UpperCAmelCase : Optional[int] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 266
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[int] = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
SCREAMING_SNAKE_CASE_:str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_:Union[str, Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE_:Any = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE_:Dict = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_:List[Any] = Path(tmpdirname)
SCREAMING_SNAKE_CASE_:str = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE_:Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE_:Any = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE_:Optional[int] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE_:Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE_:str = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 662
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_snake_case = False
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self , _UpperCAmelCase=32):
set_seed(0)
lowerCAmelCase_ = UNetaDModel(sample_size=_UpperCAmelCase , in_channels=3 , out_channels=3)
lowerCAmelCase_ = torch.optim.SGD(model.parameters() , lr=0.0001)
return model, optimizer
@slow
def lowercase__ ( self):
lowerCAmelCase_ = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase_ = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_UpperCAmelCase , )
lowerCAmelCase_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
lowerCAmelCase_ = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(_UpperCAmelCase) for _ in range(4)]
lowerCAmelCase_ = [torch.randn((4, 3, 32, 32)).to(_UpperCAmelCase) for _ in range(4)]
lowerCAmelCase_ = [torch.randint(0 , 1_000 , (4,)).long().to(_UpperCAmelCase) for _ in range(4)]
# train with a DDPM scheduler
lowerCAmelCase_ , lowerCAmelCase_ = self.get_model_optimizer(resolution=32)
model.train().to(_UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
lowerCAmelCase_ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
lowerCAmelCase_ = model(_UpperCAmelCase , timesteps[i]).sample
lowerCAmelCase_ = torch.nn.functional.mse_loss(_UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase_ , lowerCAmelCase_ = self.get_model_optimizer(resolution=32)
model.train().to(_UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
lowerCAmelCase_ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
lowerCAmelCase_ = model(_UpperCAmelCase , timesteps[i]).sample
lowerCAmelCase_ = torch.nn.functional.mse_loss(_UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5))
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5))
| 413
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_snake_case = logging.get_logger(__name__)
def lowerCamelCase_ ( A : List[str] , A : Optional[int] ):
"""simple docstring"""
lowerCAmelCase_ = nn.functional.normalize(A )
lowerCAmelCase_ = nn.functional.normalize(A )
return torch.mm(A , normalized_text_embeds.t() )
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Tuple = CLIPConfig
a :int = ['CLIPEncoderLayer']
def __init__( self , _UpperCAmelCase):
super().__init__(_UpperCAmelCase)
lowerCAmelCase_ = CLIPVisionModel(config.vision_config)
lowerCAmelCase_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(17 , config.projection_dim) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(17) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(3) , requires_grad=_UpperCAmelCase)
@torch.no_grad()
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.vision_model(_UpperCAmelCase)[1] # pooled_output
lowerCAmelCase_ = self.visual_projection(_UpperCAmelCase)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds).cpu().float().numpy()
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds).cpu().float().numpy()
lowerCAmelCase_ = []
lowerCAmelCase_ = image_embeds.shape[0]
for i in range(_UpperCAmelCase):
lowerCAmelCase_ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
lowerCAmelCase_ = special_cos_dist[i][concept_idx]
lowerCAmelCase_ = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]})
lowerCAmelCase_ = 0.01
for concept_idx in range(len(cos_dist[0])):
lowerCAmelCase_ = cos_dist[i][concept_idx]
lowerCAmelCase_ = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase)
result.append(_UpperCAmelCase)
lowerCAmelCase_ = [len(res['''bad_concepts''']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.vision_model(_UpperCAmelCase)[1] # pooled_output
lowerCAmelCase_ = self.visual_projection(_UpperCAmelCase)
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds)
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase_ = torch.any(special_scores > 0 , dim=1)
lowerCAmelCase_ = special_care * 0.01
lowerCAmelCase_ = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
lowerCAmelCase_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase_ = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 413
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase__ : Optional[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def UpperCAmelCase_ ( __UpperCAmelCase : List[str]=None ) -> Optional[int]:
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('tpu-config' , description=_description )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE_ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__UpperCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__UpperCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
SCREAMING_SNAKE_CASE_ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__UpperCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE_ = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE_ = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE_ = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE_ = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE_ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE_ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE_ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
SCREAMING_SNAKE_CASE_ = '; '.join(__UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE_ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(__UpperCAmelCase )}" )
return
subprocess.run(__UpperCAmelCase )
print('Successfully setup pod.' )
def UpperCAmelCase_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tpu_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
tpu_command_launcher(__UpperCAmelCase )
| 31
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 31
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCAmelCase__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class a__ ( _a ):
lowercase_ = ["pixel_values"]
def __init__( self : Any , UpperCamelCase_ : List[Any] = True , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : str = PILImageResampling.BILINEAR , UpperCamelCase_ : Any = True , UpperCamelCase_ : str = None , UpperCamelCase_ : Dict = True , UpperCamelCase_ : Any = 1 / 255 , UpperCamelCase_ : List[Any] = True , UpperCamelCase_ : Any = None , UpperCamelCase_ : List[Any] = None , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_A)
__UpperCAmelCase : List[Any] = size if size is not None else {"shortest_edge": 224}
__UpperCAmelCase : Optional[Any] = get_size_dict(_A , default_to_square=_A)
__UpperCAmelCase : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
__UpperCAmelCase : int = get_size_dict(_A , param_name="crop_size")
__UpperCAmelCase : List[str] = do_resize
__UpperCAmelCase : int = size
__UpperCAmelCase : Union[str, Any] = do_center_crop
__UpperCAmelCase : Optional[Any] = crop_size
__UpperCAmelCase : str = resample
__UpperCAmelCase : Optional[Any] = do_rescale
__UpperCAmelCase : Union[str, Any] = rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize
__UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] = PILImageResampling.BILINEAR , UpperCamelCase_ : Union[str, Any] = None , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = get_size_dict(_A , default_to_square=_A)
if "shortest_edge" in size:
__UpperCAmelCase : Optional[Any] = get_resize_output_image_size(_A , size["shortest_edge"] , default_to_square=_A)
elif "height" in size and "width" in size:
__UpperCAmelCase : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
return resize(_A , size=_A , resample=_A , data_format=_A , **_A)
def a_ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] = None , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
__UpperCAmelCase : List[str] = get_size_dict(_A)
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}")
return center_crop(_A , size=(size["height"], size["width"]) , data_format=_A , **_A)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] = None , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict = None , **UpperCamelCase_ : Tuple , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A)
def a_ ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : Dict = None , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Union[str, Any] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
__UpperCAmelCase : Optional[int] = to_numpy_array(_A)
if do_resize:
__UpperCAmelCase : List[str] = self.resize(image=_A , size=_A , resample=_A)
if do_center_crop:
__UpperCAmelCase : Optional[int] = self.center_crop(_A , size=_A)
if do_rescale:
__UpperCAmelCase : Union[str, Any] = self.rescale(image=_A , scale=_A)
if do_normalize:
__UpperCAmelCase : int = self.normalize(image=_A , mean=_A , std=_A)
__UpperCAmelCase : Union[str, Any] = to_channel_dimension_format(_A , _A)
return image
def a_ ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = None , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Union[str, Any] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : Any = None , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : List[Any] = ChannelDimension.FIRST , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Any = resample if resample is not None else self.resample
__UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
__UpperCAmelCase : str = size if size is not None else self.size
__UpperCAmelCase : str = get_size_dict(_A , default_to_square=_A)
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : List[str] = get_size_dict(_A , param_name="crop_size")
if not valid_images(_A):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
__UpperCAmelCase : Dict = make_batched(_A)
__UpperCAmelCase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
__UpperCAmelCase : List[str] = {"pixel_values": videos}
return BatchFeature(data=_A , tensor_type=_A)
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class a__ ( __magic_name__ ):
lowercase_ = "gpt_bigcode"
lowercase_ = ["past_key_values"]
lowercase_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , UpperCamelCase_ : Tuple=50257 , UpperCamelCase_ : Dict=1024 , UpperCamelCase_ : Optional[Any]=768 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]="gelu_pytorch_tanh" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : str=50256 , UpperCamelCase_ : Union[str, Any]=50256 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Union[str, Any]=True , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = n_positions
__UpperCAmelCase : Tuple = n_embd
__UpperCAmelCase : str = n_layer
__UpperCAmelCase : Dict = n_head
__UpperCAmelCase : Optional[Any] = n_inner
__UpperCAmelCase : Optional[Any] = activation_function
__UpperCAmelCase : List[str] = resid_pdrop
__UpperCAmelCase : List[Any] = embd_pdrop
__UpperCAmelCase : Optional[Any] = attn_pdrop
__UpperCAmelCase : Dict = layer_norm_epsilon
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : int = scale_attn_weights
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : List[Any] = attention_softmax_in_fpaa
__UpperCAmelCase : Any = scale_attention_softmax_in_fpaa
__UpperCAmelCase : str = multi_query
__UpperCAmelCase : int = bos_token_id
__UpperCAmelCase : str = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
| 487
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '''▁'''
UpperCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase_ = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
UpperCAmelCase_ = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
UpperCAmelCase_ = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __UpperCamelCase ( A__ ):
__A : Optional[Any] = VOCAB_FILES_NAMES
__A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = ["input_ids", "attention_mask"]
__A : List[int] = []
__A : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<unk>" , _UpperCamelCase="m2m100" , _UpperCamelCase = None , _UpperCamelCase=8 , **_UpperCamelCase , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = language_codes
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
_UpperCAmelCase = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
_UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(UpperCAmelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(UpperCAmelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , language_codes=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=UpperCAmelCase__ , **UpperCAmelCase__ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = load_json(UpperCAmelCase__ )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = spm_file
_UpperCAmelCase = load_spm(UpperCAmelCase__ , self.sp_model_kwargs )
_UpperCAmelCase = len(self.encoder )
_UpperCAmelCase = {
self.get_lang_token(UpperCAmelCase__ ): self.encoder_size + i for i, lang_code in enumerate(UpperCAmelCase__ )
}
_UpperCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(UpperCAmelCase__ )}
_UpperCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
_UpperCAmelCase = src_lang if src_lang is not None else '''en'''
_UpperCAmelCase = tgt_lang
_UpperCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_UpperCAmelCase = num_madeup_words
@property
def UpperCamelCase( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase( self , _UpperCamelCase ):
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def UpperCamelCase( self , _UpperCamelCase ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(UpperCAmelCase__ , self.encoder[self.unk_token] )
def UpperCamelCase( self , _UpperCamelCase ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(UpperCAmelCase__ , self.unk_token )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
_UpperCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + ([0] * len(UpperCAmelCase__ )) + suffix_ones
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = Path(UpperCAmelCase__ )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
_UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , UpperCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase__ , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (str(UpperCAmelCase__ ), str(UpperCAmelCase__ ))
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = "en" , _UpperCamelCase = None , _UpperCamelCase = "ro" , **_UpperCamelCase , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ )
_UpperCAmelCase = self.get_lang_id(UpperCAmelCase__ )
_UpperCAmelCase = tgt_lang_id
return inputs
def UpperCamelCase( self ):
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.get_lang_token(UpperCAmelCase__ )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.get_lang_token(UpperCAmelCase__ )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase ):
return self.lang_code_to_token[lang]
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = self.get_lang_token(UpperCAmelCase__ )
return self.lang_token_to_id[lang_token]
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ )
spm.Load(str(lowerCAmelCase_ ) )
return spm
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
"""simple docstring"""
with open(lowerCAmelCase_ , '''r''' ) as f:
return json.load(lowerCAmelCase_ )
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(lowerCAmelCase_ , '''w''' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
| 32
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict:
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> int:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) )
__SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 682
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _SCREAMING_SNAKE_CASE ( yaml.SafeLoader):
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =[self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase_ =[tuple(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else key for key in keys]
lowerCamelCase_ =Counter(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )-> int:
lowerCamelCase_ =super().construct_mapping(_SCREAMING_SNAKE_CASE , deep=_SCREAMING_SNAKE_CASE )
self._check_no_duplicates_on_constructed_node(_SCREAMING_SNAKE_CASE )
return mapping
def __UpperCamelCase ( _A : str ) ->Tuple[Optional[str], str]:
"""simple docstring"""
lowerCamelCase_ =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase_ =full_content[1:].index("""---""" ) + 1
lowerCamelCase_ ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_A )
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
# class attributes
_UpperCamelCase:str = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE )-> "DatasetMetadata":
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as readme_file:
lowerCamelCase_ , lowerCamelCase_ =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_SCREAMING_SNAKE_CASE )
else:
return cls()
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple:
if path.exists():
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as readme_file:
lowerCamelCase_ =readme_file.read()
else:
lowerCamelCase_ =None
lowerCamelCase_ =self._to_readme(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None )-> str:
if readme_content is not None:
lowerCamelCase_ , lowerCamelCase_ =_split_yaml_from_readme(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
lowerCamelCase_ ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE )-> "DatasetMetadata":
lowerCamelCase_ =yaml.load(_SCREAMING_SNAKE_CASE , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase_ ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> str:
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE , encoding="""utf-8""" , ).decode("""utf-8""" )
__A : List[str] = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A : Tuple = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__A : Any = ap.parse_args()
__A : Dict = Path(args.readme_filepath)
__A : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 75
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =simple_accuracy(_A , _A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int:
"""simple docstring"""
lowerCamelCase_ ={}
for id_pred, label in zip(_A , _A ):
lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowerCamelCase_ =id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ =[(pred, label)]
lowerCamelCase_ , lowerCamelCase_ =[], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ =zip(*_A )
lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" )
fas.append(_A )
lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) )
ems.append(_A )
lowerCamelCase_ =float(sum(_A ) / len(_A ) )
lowerCamelCase_ =sum(_A ) / len(_A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _snake_case ( self )-> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" )
elif self.config_name == "record":
lowerCamelCase_ =[
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 75
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowercase :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Union[str, Any] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A )
return config, inputs_dict
def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : Dict = model_class_name(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,)
UpperCAmelCase__ : Dict = model.decode(A ,A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : str = model_class_name(A )
UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A )
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxPegasusModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A ,A ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A )
UpperCAmelCase__ : int = model_class(A )
@jax.jit
def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ):
return model.encode(input_ids=A ,attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = model_class(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase__ : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ):
return model.decode(
decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A )
UpperCAmelCase__ : Any = np.ones((1, 1) )
UpperCAmelCase__ : Optional[Any] = model(A )
self.assertIsNotNone(A )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A )
UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences
UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A )
assert tgt_text == decoded
| 65
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase_ ( __a : Callable[[int | float], int | float] , __a : int | float , __a : int | float , __a : int = 1_00 , ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = x_start
_lowerCamelCase : str = fnc(__a )
_lowerCamelCase : List[Any] = 0.0
for _ in range(__a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCamelCase : Any = (x_end - x_start) / steps + xa
_lowerCamelCase : Tuple = fnc(__a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCamelCase : str = xa
_lowerCamelCase : Optional[Any] = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase_ ( __a : List[str] ):
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
a_ = 10
while i <= 10_00_00:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 704
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Any = DebertaTokenizer
a_ : Dict = True
a_ : Optional[Any] = DebertaTokenizerFast
def _lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
_lowerCamelCase : int = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCamelCase : List[str] = {'unk_token': '[UNK]'}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def _lowerCAmelCase ( self , **A ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , A ):
_lowerCamelCase : Union[str, Any] = 'lower newer'
_lowerCamelCase : Optional[int] = 'lower newer'
return input_text, output_text
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = 'lower newer'
_lowerCamelCase : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = tokenizer('Hello' , 'World' )
_lowerCamelCase : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , A )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCamelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=A )
_lowerCamelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=A )
_lowerCamelCase : str = tokenizer.encode(
'sequence builders' , add_special_tokens=A , add_prefix_space=A )
_lowerCamelCase : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=A , add_prefix_space=A )
_lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(A )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowerCamelCase : List[Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCamelCase : Optional[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
_lowerCamelCase : Optional[Any] = tokenizer(A , padding=A )
_lowerCamelCase : List[Any] = [tokenizer.decode(A , skip_special_tokens=A ) for seq in encoding['input_ids']]
# fmt: off
_lowerCamelCase : Optional[Any] = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCamelCase : Tuple = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , A )
for expected, decoded in zip(A , A ):
self.assertEqual(A , A )
| 349
| 0
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[Any] ) -> None:
__snake_case = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__snake_case = Vector()
def a ( self : List[str] ) -> None:
__snake_case = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , '(0,0,0,0,0,1)' )
def a ( self : Optional[Any] ) -> None:
__snake_case = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
def a ( self : Union[str, Any] ) -> None:
__snake_case = Vector([1, 2] )
__snake_case = Vector([1, 2, 3, 4, 5] )
__snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def a ( self : int ) -> None:
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def a ( self : int ) -> None:
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def a ( self : Union[str, Any] ) -> None:
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([2, -1, 4] ) # for test of dot product
__snake_case = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def a ( self : int ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def a ( self : Any ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def a ( self : int ) -> None:
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , '(3,4,7)' )
def a ( self : Optional[Any] ) -> None:
__snake_case = Vector([1, 0, 0, 0, 0, 0] )
__snake_case = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , str(SCREAMING_SNAKE_CASE_ ) )
def a ( self : Optional[Any] ) -> None:
__snake_case = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , '(0,1,0)' )
def a ( self : Any ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE_ ) )
def a ( self : Tuple ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def a ( self : str ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def a ( self : Optional[Any] ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def a ( self : Any ) -> None:
__snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__snake_case = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def a ( self : Union[str, Any] ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE_ ) )
def a ( self : Any ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def a ( self : Tuple ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def a ( self : int ) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def a ( self : Any ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 56
|
"""simple docstring"""
def A_ ( lowercase = 1 , lowercase = 1000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[str] = 0
for divide_by_number in range(lowercase , digit + 1 ):
UpperCAmelCase_ : list[int] = []
UpperCAmelCase_ : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase ):
UpperCAmelCase_ : Dict = len(lowercase )
UpperCAmelCase_ : Optional[int] = divide_by_number
else:
has_been_divided.append(lowercase )
UpperCAmelCase_ : List[str] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def lowercase__ ( __UpperCamelCase : Sequence[float] , __UpperCamelCase : bool = False ):
'''simple docstring'''
if not arr:
return 0
__lowercase = 0 if allow_empty_subarrays else float("""-inf""" )
__lowercase = 0.0
for num in arr:
__lowercase = max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowercase = max(__UpperCamelCase , __UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 711
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase__ ( __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = np.full((len(__UpperCamelCase ), sequence_length, 2) , __UpperCamelCase )
else:
__lowercase = np.full((len(__UpperCamelCase ), sequence_length) , __UpperCamelCase )
for i, tensor in enumerate(__UpperCamelCase ):
if padding_side == "right":
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase__ ( __UpperCamelCase : Dict ):
'''simple docstring'''
__lowercase = ord(__UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase = unicodedata.category(__UpperCamelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : PreTrainedTokenizerBase
UpperCamelCase : Union[bool, str, PaddingStrategy] = True
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = -100
UpperCamelCase : str = "pt"
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
import torch
__lowercase = """label""" if """label""" in features[0].keys() else """labels"""
__lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
__lowercase = torch.tensor(batch["""entity_ids"""] ).shape[1]
__lowercase = self.tokenizer.padding_side
if padding_side == "right":
__lowercase = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
__lowercase = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
__lowercase = [feature["""ner_tags"""] for feature in features]
__lowercase = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = [feature["""original_entity_spans"""] for feature in features]
__lowercase = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
__lowercase = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 339
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 159
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ = model(_snake_case )["last_hidden_state"]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 159
| 1
|
'''simple docstring'''
import qiskit
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(lowerCamelCase__ ,lowerCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase_ = qiskit.execute(lowerCamelCase__ ,lowerCamelCase__ ,shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
A_ : List[str] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 715
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'bert-generation'
def __init__( self , SCREAMING_SNAKE_CASE_=50358 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
| 384
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase (a_ :Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
lowercase :Dict = []
lowercase :Tuple = []
lowercase :Union[str, Any] = []
for rt in rc.restypes:
lowercase :List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase :int = {name: i for i, name in enumerate(a_)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase :Optional[Any] = torch.tensor(
a_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase :Any = torch.tensor(
a_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase :Union[str, Any] = torch.tensor(
a_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase :Optional[Any] = protein['''aatype'''].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase :Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
lowercase :str = restype_atomaa_mask[protein_aatype]
lowercase :Union[str, Any] = residx_atomaa_mask
lowercase :int = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase :List[Any] = restype_atomaa_to_atomaa[protein_aatype]
lowercase :Optional[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase :Optional[int] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase :Dict = rc.restype_atoa[restype_letter]
lowercase :Any = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase :Dict = rc.atom_order[atom_name]
lowercase :Union[str, Any] = 1
lowercase :Tuple = restype_atomaa_mask[protein_aatype]
lowercase :Tuple = residx_atomaa_mask
return protein
def lowerCamelCase (a_ :Dict[str, torch.Tensor]) -> Dict[str, np.ndarray]:
lowercase :str = tree_map(lambda a_: torch.tensor(a_ , device=batch['''aatype'''].device) , a_ , np.ndarray)
lowercase :int = tensor_tree_map(lambda a_: np.array(a_) , make_atomaa_masks(a_))
return out
| 677
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 677
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : str = MvpTokenizer
A__ : Union[str, Any] = MvpTokenizerFast
A__ : int = True
A__ : Any = filter_roberta_detectors
def __A ( self ) -> List[Any]:
super().setUp()
_UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_UpperCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase = {"unk_token": "<unk>"}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def __A ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def __A ( self , **snake_case_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def __A ( self , snake_case_ ) -> Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def __A ( self ) -> int:
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def __A ( self ) -> Union[str, Any]:
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def __A ( self ) -> str:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def __A ( self ) -> Tuple:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case_ )
self.assertIn("attention_mask" , snake_case_ )
self.assertNotIn("labels" , snake_case_ )
self.assertNotIn("decoder_attention_mask" , snake_case_ )
@require_torch
def __A ( self ) -> Tuple:
_UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(text_target=snake_case_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def __A ( self ) -> Dict:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=snake_case_ , truncation=snake_case_ , return_tensors="pt" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def __A ( self ) -> List[str]:
_UpperCAmelCase = ["A long paragraph for summarization."]
_UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="pt" )
_UpperCAmelCase = inputs["input_ids"]
_UpperCAmelCase = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __A ( self ) -> Optional[Any]:
pass
def __A ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
_UpperCAmelCase = "A, <mask> AllenNLP sentence."
_UpperCAmelCase = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
_UpperCAmelCase = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 716
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE_ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase = self.diffusers_dir
shutil.copy(
os.path.join(snake_case_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __A ( self ) -> List[str]:
_UpperCAmelCase = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Optional[int]:
_UpperCAmelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCAmelCase = black.format_str(snake_case_ , mode=snake_case_ )
_UpperCAmelCase = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case_ , "w" , newline="\n" ) as f:
f.write(snake_case_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case_ )
with open(snake_case_ , "r" ) as f:
self.assertTrue(f.read() , snake_case_ )
def __A ( self ) -> int:
_UpperCAmelCase = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case_ , snake_case_ )
def __A ( self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case_ ) , )
# Copy consistency with a really long name
_UpperCAmelCase = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case_ , snake_case_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case_ , overwrite_result=re.sub("DDPM" , "Test" , snake_case_ ) , )
| 579
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
model.train()
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = F.mse_loss(__lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
set_seed(42 )
_SCREAMING_SNAKE_CASE : str = RegressionModel()
_SCREAMING_SNAKE_CASE : List[str] = deepcopy(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = RegressionDataset(length=80 )
_SCREAMING_SNAKE_CASE : List[str] = DataLoader(__lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
_SCREAMING_SNAKE_CASE : Optional[Any] = AdamW(params=model.parameters(), lr=1e-3 )
_SCREAMING_SNAKE_CASE : str = AdamW(params=ddp_model.parameters(), lr=1e-3 )
_SCREAMING_SNAKE_CASE : List[Any] = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
_SCREAMING_SNAKE_CASE : Tuple = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ (__lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = get_training_setup(__lowerCamelCase )
# Use a single batch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = accelerator.gather((ddp_input, ddp_target) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def lowerCamelCase__ (__lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = get_training_setup(__lowerCamelCase )
# Use a single batch
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather((ddp_input, ddp_target) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def lowerCamelCase__ (__lowerCamelCase=False, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : str = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_SCREAMING_SNAKE_CASE : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def lowerCamelCase__ (__lowerCamelCase=False, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : str = Accelerator(
split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = get_training_setup(__lowerCamelCase, __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = batch.values()
# Gather the distributed inputs and targs for the base model
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = accelerator.gather((ddp_input, ddp_target) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
_SCREAMING_SNAKE_CASE : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : str = Accelerator()
_SCREAMING_SNAKE_CASE : int = RegressionDataset(length=80 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(__lowerCamelCase, batch_size=16 )
_SCREAMING_SNAKE_CASE : int = RegressionDataset(length=96 )
_SCREAMING_SNAKE_CASE : List[str] = DataLoader(__lowerCamelCase, batch_size=16 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = accelerator.prepare(__lowerCamelCase, __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Tuple = Accelerator()
_SCREAMING_SNAKE_CASE : List[str] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(__lowerCamelCase, __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase, __lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 249
|
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase ): # This function is recursive
_SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_SCREAMING_SNAKE_CASE : List[str] = array[0]
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Optional[Any] = [element for element in array[i:] if element >= array[i]]
_SCREAMING_SNAKE_CASE : Dict = longest_subsequence(__lowerCamelCase )
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = temp_array
else:
i += 1
_SCREAMING_SNAKE_CASE : int = [element for element in array[1:] if element >= pivot]
_SCREAMING_SNAKE_CASE : Any = [pivot, *longest_subsequence(__lowerCamelCase )]
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = '''▁'''
_lowerCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Any = {'''vinai/bartpho-syllable''': 1024}
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase__ , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
__A =AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
__A ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
__A =vocab_file
__A =monolingual_vocab_file
__A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__A ={}
__A =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowercase__ ) not in self.fairseq_tokens_to_ids:
__A =cnt
cnt += 1
with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
__A =line.strip().split()[0]
__A =len(self.fairseq_tokens_to_ids )
if str(lowercase__ ) not in self.fairseq_tokens_to_ids:
__A =len(self.fairseq_tokens_to_ids )
__A ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__A =self.__dict__.copy()
__A =None
__A =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase__ ):
'''simple docstring'''
__A =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A ={}
__A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A =[self.cls_token_id]
__A =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
__A =[self.sep_token_id]
__A =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ={self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A =''''''.join(lowercase__ ).replace(lowercase__ , ''' ''' ).strip()
return out_string
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A =os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__A =os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
__A =self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowercase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowercase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowercase__ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 516
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_lowerCamelCase : str = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase : int = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_lowerCamelCase : Union[str, Any] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_lowerCamelCase : Union[str, Any] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowerCamelCase : List[Any] = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_lowerCamelCase : int = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def A__ ( __A : List[str] ) ->List[str]:
__A =re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __A )
return [m.group(0 ) for m in matches]
def A__ ( ) ->Tuple:
__A =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__A ={
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__A =collections.defaultdict(__A )
__A =collections.defaultdict(__A )
__A =collections.defaultdict(__A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__A ):
__A =None
if _re_tf_models.match(__A ) is not None:
__A =tf_models
__A =_re_tf_models.match(__A ).groups()[0]
elif _re_flax_models.match(__A ) is not None:
__A =flax_models
__A =_re_flax_models.match(__A ).groups()[0]
elif _re_pt_models.match(__A ) is not None:
__A =pt_models
__A =_re_pt_models.match(__A ).groups()[0]
if lookup_dict is not None:
while len(__A ) > 0:
if attr_name in model_prefix_to_model_type:
__A =True
break
# Try again after removing the last word in the name
__A =''''''.join(camel_case_split(__A )[:-1] )
__A =set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__A =list(__A )
all_models.sort()
__A ={'''model_type''': all_models}
__A =[pt_models[t] for t in all_models]
__A =[tf_models[t] for t in all_models]
__A =[flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__A ={}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__A ='''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__A ='''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__A ='''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__A ='''AutoTokenizer'''
__A =[processors[t] for t in all_models]
return pd.DataFrame(__A )
def A__ ( __A : List[Any] ) ->Union[str, Any]:
__A =[
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__A =[model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
__A =[auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(__A , __A , __A ):
# The type of pipeline may not exist in this framework
if not hasattr(__A , __A ):
continue
# First extract all model_names
__A =[]
for name in getattr(__A , __A ).values():
if isinstance(__A , __A ):
model_names.append(__A )
else:
model_names.extend(list(__A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A__ ( __A : int , __A : Optional[Any] ) ->Dict:
__A =get_frameworks_table()
__A =Dataset.from_pandas(__A )
__A =hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=__A )
__A =Dataset.from_json(__A )
__A ={
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(__A ) )
}
__A =update_pipeline_and_auto_class_table(__A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__A =sorted(table.keys() )
__A =pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__A =Dataset.from_pandas(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__A , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(__A , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__A =(
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
__A ='''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=__A , repo_type='''dataset''' , token=__A , commit_message=__A , )
def A__ ( ) ->str:
__A ={tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__A =transformers_module.pipelines.SUPPORTED_TASKS
__A =[]
for key in pipeline_tasks:
if key not in in_table:
__A =pipeline_tasks[key]['''pt''']
if isinstance(__A , (list, tuple) ):
__A =model[0]
__A =model.__name__
if model not in in_table.values():
missing.append(__A )
if len(__A ) > 0:
__A =''', '''.join(__A )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
_lowerCamelCase : Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 516
| 1
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = metric_id
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = [MetricMock(lowerCamelCase_ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __A ( self ) -> List[str]:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
if "tmp_path" in args:
SCREAMING_SNAKE_CASE = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(SCREAMING_SNAKE_CASE_ , match='https://huggingface.co/docs/evaluate' ):
func(*SCREAMING_SNAKE_CASE_ )
| 247
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = CLIPConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""CLIPEncoderLayer"""]
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=0.5 , lowerCAmelCase__=0.5 ) -> List[str]:
SCREAMING_SNAKE_CASE = self.vision_model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = self.p_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(lowerCAmelCase__ ):
if nsfw_detected_:
SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
SCREAMING_SNAKE_CASE = self.w_head(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = watermark_detected.flatten()
SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(lowerCAmelCase__ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(lowerCAmelCase__ ):
if watermark_detected_:
SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 247
| 1
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class snake_case ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE_ = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
SCREAMING_SNAKE_CASE_ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
for example in examples:
SCREAMING_SNAKE_CASE_ = video_classifier(_A )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
SCREAMING_SNAKE_CASE_ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
SCREAMING_SNAKE_CASE_ = pipeline(
'''video-classification''' , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE_ = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}] , )
SCREAMING_SNAKE_CASE_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}],
[{'''score''': 0.51_99, '''label''': '''LABEL_0'''}, {'''score''': 0.48_01, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _lowercase (self ):
"""simple docstring"""
pass
| 705
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index
| 628
| 0
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__lowerCAmelCase = [
'Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
__lowerCAmelCase = [
'Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .'
' Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _UpperCAmelCase ( ):
a_ : List[Any] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , bootstrap_aggregation=lowerCAmelCase_ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[int] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , bootstrap_aggregation=lowerCAmelCase_ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _UpperCAmelCase ( ):
a_ : Optional[Any] = '''rougeLsum'''
a_ : Optional[Any] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=[k] )[k]
a_ : Optional[Any] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _UpperCAmelCase ( ):
a_ : int = ['''rouge1''', '''rouge2''', '''rougeL''']
a_ : List[str] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=lowerCAmelCase_ )
a_ : Union[str, Any] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=lowerCAmelCase_ )
assert score_sep == score_no_sep
def _UpperCAmelCase ( ):
a_ : int = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .''',
]
a_ : Dict = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ ) == calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ )
def _UpperCAmelCase ( ):
a_ : Dict = [
'''\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" '''
]
a_ : str = [
''' Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
a_ : List[Any] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , rouge_keys=['''rougeLsum'''] , newline_sep=lowerCAmelCase_ )['''rougeLsum''']
a_ : List[str] = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _UpperCAmelCase ( ):
a_ : Any = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
a_ : List[Any] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : str = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
| 466
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__a = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
__a = 10
__a = 256
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[MinHash]:
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
UpperCAmelCase = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class __lowercase :
def __init__( self : List[str] , *,
__lowerCamelCase : float = 0.85 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = duplication_jaccard_threshold
UpperCAmelCase = NUM_PERM
UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCAmelCase = defaultdict(__lowerCamelCase )
def _lowercase ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : MinHash ) -> None:
"""simple docstring"""
UpperCAmelCase = self._index.query(__lowerCamelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase )
def _lowercase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
UpperCAmelCase = [base] + list(__lowerCamelCase )
# reformat the cluster to be a list of dict
UpperCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__lowerCamelCase )
return duplicate_clusters
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase = self.get_duplicate_clusters()
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Tuple:
UpperCAmelCase , UpperCAmelCase = element
UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->float:
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
UpperCAmelCase = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__a = None
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for elementa in cluster:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCAmelCase = 1
extremes.append(lowerCAmelCase_ )
return extremes
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
global _shared_dataset
UpperCAmelCase = dataset
UpperCAmelCase = []
UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
UpperCAmelCase = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCAmelCase = {}
UpperCAmelCase = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
UpperCAmelCase = element
UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
UpperCAmelCase = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"""Original dataset size: {len(lowerCAmelCase_ )}""" )
print(F"""Number of duplicate clusters: {len(lowerCAmelCase_ )}""" )
print(F"""Files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Unique files in duplicate cluster: {len(lowerCAmelCase_ )}""" )
print(F"""Filtered dataset size: {len(lowerCAmelCase_ )}""" )
return ds_filter, duplicate_clusters
| 377
| 0
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _snake_case ( a__ ):
def __init__( self : List[Any] , UpperCAmelCase : pyspark.sql.DataFrame , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : bool = True , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : str = None , UpperCAmelCase : bool = True , UpperCAmelCase : str = "arrow" , **UpperCAmelCase : List[Any] , ):
super().__init__(
split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : List[Any] = load_from_cache_file
__lowerCamelCase : Union[str, Any] = file_format
__lowerCamelCase : List[Any] = Spark(
df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , )
def lowerCamelCase__ ( self : str ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__lowerCamelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 720
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: list[list[int]] ) -> bool:
'''simple docstring'''
__lowerCamelCase : str = len(_lowerCamelCase )
# We need to create solution object to save path.
__lowerCamelCase : List[Any] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
__lowerCamelCase : Dict = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("\n".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowercase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: list[list[int]] ) -> bool:
'''simple docstring'''
__lowerCamelCase : Dict = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
__lowerCamelCase : Optional[Any] = 1
return True
__lowerCamelCase : int = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCamelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCamelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCamelCase : Dict = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
__lowerCamelCase : List[str] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
| 0
|
from math import factorial, pi
def a ( a , a = 30 ) ->float:
'''simple docstring'''
if not isinstance(a__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(a__ , a__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
SCREAMING_SNAKE_CASE = float(a__ )
SCREAMING_SNAKE_CASE = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(a__ ) )
def a ( a , a = 30 ) ->float:
'''simple docstring'''
if not isinstance(a__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(a__ , a__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
SCREAMING_SNAKE_CASE = float(a__ )
SCREAMING_SNAKE_CASE = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 201
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger()
@dataclass
class __A:
snake_case_ = 42
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = len(list(m.modules() ) ) == 1 or isinstance(_snake_case , nn.Convad ) or isinstance(_snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_snake_case )
def __call__( self , _snake_case ) -> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_snake_case )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return list(filter(lambda _snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0
snake_case_ = field(default_factory=a )
snake_case_ = field(default_factory=a )
def __call__( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = Tracker(self.dest )(_snake_case ).parametrized
__a = Tracker(self.src )(_snake_case ).parametrized
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.src_skip , _snake_case ) )
__a = list(filter(lambda _snake_case : type(_snake_case ) not in self.dest_skip , _snake_case ) )
if len(_snake_case ) != len(_snake_case ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_snake_case )} operations while"""
F""" destination module has {len(_snake_case )}.""" )
for dest_m, src_m in zip(_snake_case , _snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = True ) -> str:
print(F"""Converting {name}...""" )
with torch.no_grad():
__a = timm.create_model(a__ , pretrained=a__ ).eval()
__a = ResNetForImageClassification(a__ ).eval()
__a = ModuleTransfer(src=a__ , dest=a__ )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(a__ )
assert torch.allclose(from_model(a__ ) , our_model(a__ ).logits ), "The model logits don't match the original one."
__a = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(a__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=a__ , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=a__ , )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCAmelCase ( a__ , a__ = None , a__ = True ) -> List[Any]:
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(a__ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
__a = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(a__ , names_to_config[model_name] , a__ , a__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a__ , a__ , a__ , a__ )
return config, expected_shape
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
A : List[Any] = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 219
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Any = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : Any = '''▁'''
UpperCamelCase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : Dict = BertGenerationTokenizer
_A : List[Any] = False
_A : Union[str, Any] = True
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : Dict = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = """<s>"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_0_2 )
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertGenerationTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = """Hello World!"""
__SCREAMING_SNAKE_CASE : Dict = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__SCREAMING_SNAKE_CASE : Optional[int] = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__SCREAMING_SNAKE_CASE : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
__SCREAMING_SNAKE_CASE : int = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = BertGenerationConfig()
__SCREAMING_SNAKE_CASE : Any = BertGenerationEncoder(lowerCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 178
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.