code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class __snake_case ( lowercase_ ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(*a__ ,**a__ )
requires_backends(self ,"""vision""" )
self.check_model_type(a__ )
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(a__ ,**a__ )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return {}, {}, {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = load_image(a__ )
lowercase : List[Any] = image.size
lowercase : int = self.image_processor(images=a__ ,return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = self.model(**a__ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = model_outputs.predicted_depth
lowercase : List[Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode="""bicubic""" ,align_corners=a__ )
lowercase : List[str] = prediction.squeeze().cpu().numpy()
lowercase : Any = (output * 255 / np.max(a__ )).astype("""uint8""" )
lowercase : Optional[Any] = Image.fromarray(a__ )
lowercase : List[str] = {}
lowercase : Tuple = predicted_depth
lowercase : str = depth
return output_dict
| 354
|
from bisect import bisect
from itertools import accumulate
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Dict = sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[Any] = [i[0] for i in r], [i[1] for i in r]
lowercase : Any = list(accumulate(SCREAMING_SNAKE_CASE__ ) )
lowercase : int = bisect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 0
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
A : List[str] = b * b - 4 * a * c
A : Optional[Any] = (-b + sqrt(snake_case__ )) / (2 * a)
A : int = (-b - sqrt(snake_case__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase_ ( ):
'''simple docstring'''
A, A : int = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 3
|
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def a_ ( __lowercase : int ) -> List[List[ImageInput]]:
if isinstance(__lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowercase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = ["pixel_values"]
def __init__( self : Optional[Any] , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Any , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = size if size is not None else {'shortest_edge': 224}
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(lowercase , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Tuple , ):
'''simple docstring'''
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowercase , size['shortest_edge'] , default_to_square=lowercase )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Dict , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase )
def A ( self : Any , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Tuple , ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : str , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : List[str] , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(lowercase )
if do_resize:
_snake_case = self.resize(image=lowercase , size=lowercase , resample=lowercase )
if do_center_crop:
_snake_case = self.center_crop(lowercase , size=lowercase )
if do_rescale:
_snake_case = self.rescale(image=lowercase , scale=lowercase )
if do_normalize:
_snake_case = self.normalize(image=lowercase , mean=lowercase , std=lowercase )
_snake_case = to_channel_dimension_format(lowercase , lowercase )
return image
def A ( self : Tuple , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowercase , default_to_square=lowercase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowercase , param_name='crop_size' )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_snake_case = make_batched(lowercase )
_snake_case = [
[
self._preprocess_image(
image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , )
for img in video
]
for video in videos
]
_snake_case = {'pixel_values': videos}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 358
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Dict = '''PoolFormerConfig'''
# Base docstring
_lowerCamelCase : int = '''sail/poolformer_s12'''
_lowerCamelCase : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase : Optional[int] = '''sail/poolformer_s12'''
_lowerCamelCase : List[Any] = '''tabby, tabby cat'''
_lowerCamelCase : List[str] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a_ ( __lowercase : List[Any] , __lowercase : float = 0.0 , __lowercase : bool = False ) -> Optional[int]:
if drop_prob == 0.0 or not training:
return input
_snake_case = 1 - drop_prob
_snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_snake_case = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_snake_case = input.div(__lowercase ) * random_tensor
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[float] = None ):
'''simple docstring'''
super().__init__()
_snake_case = drop_prob
def A ( self : Any , lowercase : torch.Tensor ):
'''simple docstring'''
return drop_path(lowercase , self.drop_prob , self.training )
def A ( self : Tuple ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : int , lowercase : Optional[Any] , lowercase : str=None ):
'''simple docstring'''
super().__init__()
_snake_case = patch_size if isinstance(lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
_snake_case = stride if isinstance(lowercase , collections.abc.Iterable ) else (stride, stride)
_snake_case = padding if isinstance(lowercase , collections.abc.Iterable ) else (padding, padding)
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase )
_snake_case = norm_layer(lowercase ) if norm_layer else nn.Identity()
def A ( self : int , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.projection(lowercase )
_snake_case = self.norm(lowercase )
return embeddings
class SCREAMING_SNAKE_CASE__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Dict , lowercase : List[Any] , **lowercase : str ):
'''simple docstring'''
super().__init__(1 , lowercase , **lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.AvgPoolad(lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase )
def A ( self : int , lowercase : List[str] ):
'''simple docstring'''
return self.pool(lowercase ) - hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = PoolFormerDropPath(lowercase )
if isinstance(config.hidden_act , lowercase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
_snake_case = self.conva(lowercase )
_snake_case = self.act_fn(lowercase )
_snake_case = self.drop(lowercase )
_snake_case = self.conva(lowercase )
_snake_case = self.drop(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase : Tuple , lowercase : int , lowercase : str , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case = PoolFormerPooling(lowercase )
_snake_case = PoolFormerOutput(lowercase , lowercase , lowercase , lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
# Useful for training neural nets
_snake_case = PoolFormerDropPath(lowercase ) if drop_path > 0.0 else nn.Identity()
_snake_case = config.use_layer_scale
if config.use_layer_scale:
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
if self.use_layer_scale:
_snake_case = self.pooling(self.before_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = ()
_snake_case = self.output(self.after_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = (output,) + outputs
return outputs
else:
_snake_case = self.drop_path(self.pooling(self.before_norm(lowercase ) ) )
# First residual connection
_snake_case = pooling_output + hidden_states
_snake_case = ()
# Second residual connection inside the PoolFormerOutput block
_snake_case = self.drop_path(self.output(self.after_norm(lowercase ) ) )
_snake_case = hidden_states + layer_output
_snake_case = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case = config
# stochastic depth decay rule
_snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_snake_case = nn.ModuleList(lowercase )
# Transformer blocks
_snake_case = []
_snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowercase ) )
_snake_case = nn.ModuleList(lowercase )
def A ( self : Any , lowercase : List[str] , lowercase : str=False , lowercase : Tuple=True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
_snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_snake_case , _snake_case = layers
# Get patch embeddings from hidden_states
_snake_case = embedding_layer(lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase ):
_snake_case = blk(lowercase )
_snake_case = layer_outputs[0]
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PoolFormerConfig
_UpperCAmelCase : Optional[int] = "poolformer"
_UpperCAmelCase : str = "pixel_values"
_UpperCAmelCase : int = True
def A ( self : Tuple , lowercase : str ):
'''simple docstring'''
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A ( self : Optional[Any] , lowercase : str , lowercase : Dict=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : Optional[Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : Tuple = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : List[Any] ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = PoolFormerEncoder(lowercase )
# Initialize weights and apply final processing
self.post_init()
def A ( self : List[str] ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Tuple , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def A ( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = self.dense(lowercase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = PoolFormerModel(lowercase )
# Final norm
_snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.poolformer(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = outputs[0]
_snake_case = self.classifier(self.norm(lowercase ).mean([-2, -1] ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 130
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_lowercase: Any = "<<<<<<< This should probably be modified because it mentions: "
_lowercase: List[Any] = "=======\n>>>>>>>\n"
_lowercase: Optional[Any] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
_lowercase: Tuple = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def a( A : Namespace ) -> List[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ ):
"""simple docstring"""
a = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase_ , required=lowerCamelCase_ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase_ )
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ):
"""simple docstring"""
a = get_logger("datasets-cli/converting" )
a = tfds_path
a = datasets_directory
def UpperCamelCase_ (self ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
a = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a = []
a = []
a = {}
if os.path.isdir(self._tfds_path ):
a = os.listdir(lowerCamelCase_ )
else:
a = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isfile(lowerCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase_ , encoding="utf-8" ) as f:
a = f.readlines()
a = []
a = False
a = False
a = []
for line in lines:
a = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a = ""
continue
elif "from absl import logging" in out_line:
a = "from datasets import logging\n"
elif "getLogger" in out_line:
a = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a = True
a = list(filter(lambda lowerCamelCase_ : e in out_line , lowerCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase_ ) + "\n" )
out_lines.append(lowerCamelCase_ )
out_lines.append(lowerCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
a = re.sub(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a = True
out_lines.append(lowerCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a = f_name.replace(".py" , "" )
a = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
a = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase_ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase_ )
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase_ )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a = os.path.basename(lowerCamelCase_ )
a = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase_ , lowerCamelCase_ )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 227
|
def a( A : list ) -> list:
"""simple docstring"""
if any(not isinstance(A , A ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 227
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 / 255 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase : Tuple = parent
lowercase : int = batch_size
lowercase : List[str] = num_channels
lowercase : Optional[int] = min_resolution
lowercase : int = max_resolution
lowercase : List[str] = do_resize
lowercase : Any = size
lowercase : Any = do_rescale
lowercase : Optional[Any] = rescale_factor
lowercase : Optional[Any] = do_normalize
lowercase : Tuple = image_mean
lowercase : Any = image_std
lowercase : Tuple = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
if not batched:
lowercase : Dict = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
lowercase , lowercase : str = image.size
else:
lowercase , lowercase : int = image.shape[1], image.shape[2]
if w < h:
lowercase : List[str] = int(self.size['''shortest_edge'''] * h / w )
lowercase : List[str] = self.size['''shortest_edge''']
elif w > h:
lowercase : Dict = self.size['''shortest_edge''']
lowercase : List[str] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : Any = self.size['''shortest_edge''']
lowercase : Tuple = self.size['''shortest_edge''']
else:
lowercase : List[Any] = []
for image in image_inputs:
lowercase , lowercase : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : List[Any] = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
lowercase : Dict = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : List[Any] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
lowercase : Optional[int] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
lowercase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_rescale''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''rescale_factor''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_pad''' ) )
def __lowerCamelCase ( self ):
lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
lowercase : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : str = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Tuple = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
lowercase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : Optional[int] = json.loads(f.read() )
lowercase : List[Any] = {'''image_id''': 39769, '''annotations''': target}
# encode them
lowercase : List[str] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
lowercase : List[str] = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
# verify pixel values
lowercase : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ )
lowercase : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
# verify area
lowercase : Optional[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
lowercase : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ )
lowercase : int = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# verify image_id
lowercase : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
lowercase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
lowercase : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
lowercase : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) )
# verify size
lowercase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
lowercase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Any = json.loads(f.read() )
lowercase : int = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
lowercase : str = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : str = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
lowercase : List[str] = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
# verify pixel values
lowercase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
# verify area
lowercase : Dict = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
lowercase : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
# verify image_id
lowercase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
lowercase : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
lowercase : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
lowercase : int = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
lowercase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) )
# verify size
lowercase : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) )
| 173
|
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Dict = int(_UpperCamelCase )
# Initialize Result
lowercase : Union[str, Any] = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__a = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__a = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 173
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , __snake_case )
super().__init__(**__snake_case )
def a_ ( self , __snake_case , __snake_case = True , __snake_case = None , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
snake_case = '''tf'''
elif is_torch_tensor(__snake_case ):
snake_case = '''pt'''
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F'''type of {first_element} unknown: {type(__snake_case )}. '''
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(__snake_case )
else:
snake_case = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(__snake_case ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(__snake_case ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def a_ ( self , __snake_case , __snake_case = None , __snake_case = PaddingStrategy.DO_NOT_PAD , __snake_case = None , __snake_case = None , ):
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
__snake_case , __snake_case , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
__snake_case , __snake_case , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def a_ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(__snake_case ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def a_ ( self , __snake_case=False , __snake_case=None ):
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
snake_case = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 127
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_SCREAMING_SNAKE_CASE : List[str] = "hf-internal-testing/tiny-random-bert"
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
_SCREAMING_SNAKE_CASE : Optional[int] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = cached_file(__snake_case , __snake_case )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__snake_case ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__snake_case , __snake_case ) ) )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
self.assertTrue(os.path.isfile(__snake_case ) )
# File is cached at the same place the second time.
snake_case = cached_file(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(__snake_case , __snake_case , revision='''9b8c223''' )
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
snake_case = cached_file('''tiny-random-bert''' , __snake_case )
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
snake_case = cached_file(__snake_case , __snake_case , revision='''aaaa''' )
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(__snake_case , '''.no_exist''' , __snake_case , '''conf''' ) ) )
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = cached_file(__snake_case , '''conf''' , local_files_only=__snake_case , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = mock.Mock()
snake_case = 5_0_0
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__snake_case ) as mock_head:
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_connection_errors=__snake_case )
self.assertIsNone(__snake_case )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
def a_ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __snake_case )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __snake_case , revision='''ahaha''' )
snake_case = get_file_from_repo('''bert-base-cased''' , __snake_case )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(__snake_case , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def a_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(__snake_case ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__snake_case , '''a.txt''' ) , str(__snake_case ) )
self.assertIsNone(get_file_from_repo(__snake_case , '''b.txt''' ) )
| 127
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 366
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
| 340
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__A : List[str] = HfApi()
__A : Optional[Any] = {}
# fmt: off
__A : Tuple = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__A : Optional[Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__A : int = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__A : int = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__A : List[str] = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__A : Optional[int] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__A : Any = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__A : Dict = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__A : Any = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__A : int = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__A : Optional[int] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__A : Any = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__A : Dict = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__A : Optional[Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__A : Dict = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__A : List[str] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__A : Optional[Any] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("CompVis"):
__A : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
__A : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__A : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__A : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__A : Dict = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 356
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=_SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['note_seq']
def __init__( self : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : Tuple ) -> Any:
requires_backends(self , ["""note_seq"""] )
@classmethod
def __lowercase ( cls : Optional[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Dict ) -> List[Any]:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ["""note_seq"""] )
| 89
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__magic_name__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__lowercase : List[Any] = VOCAB_FILES_NAMES
__lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowercase : List[Any] = ['''input_ids''', '''attention_mask''']
__lowercase : List[str] = DistilBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , normalizer_state.pop("""type"""))
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE = normalizer_class(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = do_lower_case
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 100
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 768 , ):
super().__init__()
lowercase__ : List[str] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE : Optional[torch.dtype] = None , ):
lowercase__ : Union[str, Any] = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) )
return self
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 130
| 0
|
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=A__ )
lowerCAmelCase_ : List[str] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=A__ )
env_command_parser(subparsers=A__ )
launch_command_parser(subparsers=A__ )
tpu_command_parser(subparsers=A__ )
test_command_parser(subparsers=A__ )
# Let's go
lowerCAmelCase_ : Dict = parser.parse_args()
if not hasattr(A__ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(A__ )
if __name__ == "__main__":
main()
| 353
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A : Tuple = "sshleifer/mar_enro_6_3_student"
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : List[Any] ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ : Any = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def __lowercase ( self : str ) -> str:
MarianMTModel.from_pretrained(lowerCamelCase )
@slow
@require_torch_gpu
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : str = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowerCAmelCase_ : Dict = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowerCAmelCase_ : Optional[int] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase_ : Tuple = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase_ : Tuple = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
lowerCAmelCase_ : Any = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Dict = main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : int = load_json(model.metrics_save_path )
lowerCAmelCase_ : Optional[Any] = metrics["""val"""][0]
lowerCAmelCase_ : Tuple = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : List[str] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : List[str] = F'{self.test_file_dir_str}/test_data/wmt_en_ro'
lowerCAmelCase_ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_28,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowerCAmelCase_ : int = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowerCAmelCase_ : str = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowerCAmelCase_ : Tuple = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
lowerCAmelCase_ : Dict = 6
lowerCAmelCase_ : List[Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'--output_dir={output_dir}',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'--num_train_epochs={epochs}',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
lowerCAmelCase_ : int = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationDistiller.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase_ : str = distill_main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : Union[str, Any] = load_json(model.metrics_save_path )
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][0]
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Optional[int] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : Union[str, Any] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 89
| 0
|
import os
from collections.abc import Iterator
def lowercase_( SCREAMING_SNAKE_CASE_ = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(UpperCAmelCase_ ):
lowerCamelCase : Union[str, Any] = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCAmelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ).lstrip("./" )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return f"""{i * ' '}*""" if i else "\n##"
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCAmelCase_ ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(UpperCAmelCase_ )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def lowercase_( SCREAMING_SNAKE_CASE_ = "." ):
'''simple docstring'''
lowerCamelCase : str = ''
for filepath in sorted(good_file_paths(UpperCAmelCase_ ) ):
lowerCamelCase : Optional[int] = os.path.split(UpperCAmelCase_ )
if filepath != old_path:
lowerCamelCase : Any = print_path(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Any = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCamelCase : str = f"""{filepath}/{filename}""".replace(" " , "%20" )
lowerCamelCase : List[Any] = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"""{md_prefix(UpperCAmelCase_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 283
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int]= False
_a : int= False
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Any:
__snake_case : Any = parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_A , required=_A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_A , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_A , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_A , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_A , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_A , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_A , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_A , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_A , default=32 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_A , default=64 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_A , default=3E-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_A , default=1E-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_A)
def __init__(self : int , _A : Namespace) -> Tuple:
__snake_case : Optional[int] = logging.get_logger('transformers-cli/training')
__snake_case : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_A)
__snake_case : List[Any] = args.output
__snake_case : Any = args.column_label
__snake_case : str = args.column_text
__snake_case : Any = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
__snake_case : List[str] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
__snake_case : List[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
__snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = args.validation_split
__snake_case : str = args.train_batch_size
__snake_case : Any = args.valid_batch_size
__snake_case : Union[str, Any] = args.learning_rate
__snake_case : str = args.adam_epsilon
def _lowercase (self : List[str]) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase (self : str) -> int:
raise NotImplementedError
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 172
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'codegen'
lowercase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , A=50400 , A=2048 , A=2048 , A=4096 , A=28 , A=16 , A=64 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1e-5 , A=0.0_2 , A=True , A=50256 , A=50256 , A=False , **A , ) -> str:
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = n_ctx
UpperCAmelCase : Optional[Any] = n_positions
UpperCAmelCase : Tuple = n_embd
UpperCAmelCase : Any = n_layer
UpperCAmelCase : Tuple = n_head
UpperCAmelCase : Optional[Any] = n_inner
UpperCAmelCase : List[Any] = rotary_dim
UpperCAmelCase : Union[str, Any] = activation_function
UpperCAmelCase : Any = resid_pdrop
UpperCAmelCase : Optional[int] = embd_pdrop
UpperCAmelCase : Dict = attn_pdrop
UpperCAmelCase : Dict = layer_norm_epsilon
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Any = use_cache
UpperCAmelCase : Optional[int] = bos_token_id
UpperCAmelCase : Tuple = eos_token_id
super().__init__(
bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A = "default" , A = None , A = False , ) -> Union[str, Any]:
super().__init__(A , task=A , patching_specs=A , use_past=A )
if not getattr(self._config , """pad_token_id""" , A ):
# TODO: how to do that better?
UpperCAmelCase : Optional[int] = 0
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : int = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
UpperCAmelCase : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCAmelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _lowercase( self ) -> int:
return self._config.n_layer
@property
def _lowercase( self ) -> int:
return self._config.n_head
def _lowercase( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
UpperCAmelCase : Any = super(A , self ).generate_dummy_inputs(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase : List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase : Dict = seqlen + 2
UpperCAmelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
UpperCAmelCase : Optional[int] = ordered_inputs["""attention_mask"""].dtype
UpperCAmelCase : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
return ordered_inputs
@property
def _lowercase( self ) -> int:
return 13
| 363
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 338
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
UpperCAmelCase : str = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
UpperCAmelCase : Dict = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Union[str, Any] = json.loads(f.read() )
__UpperCAmelCase : Optional[int] = collections.OrderedDict()
__UpperCAmelCase : Optional[int] = collections.OrderedDict()
__UpperCAmelCase : List[Any] = collections.OrderedDict()
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
__UpperCAmelCase : str = f.readlines()
__UpperCAmelCase : str = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(UpperCamelCase_ ):
__UpperCAmelCase : int = b
__UpperCAmelCase : Optional[Any] = idx
for wd in b:
__UpperCAmelCase : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCamelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str="<|endoftext|>" , UpperCamelCase : Union[str, Any]="<|endoftext|>" , UpperCamelCase : int="<|startoftext|>" , UpperCamelCase : Optional[Any]="<|endoftext|>" , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(
unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , do_clean_text=__UpperCAmelCase , **__UpperCAmelCase , )
if not os.path.isfile(__UpperCAmelCase ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(__UpperCAmelCase ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__UpperCAmelCase : List[str] = do_clean_text
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : str = load_vocab_and_emoji(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Dict = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return len(self.raw_vocab )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(__UpperCAmelCase , clean=self.do_clean_text )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = """""".join(__UpperCAmelCase ).strip()
return out_string
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCAmelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
def lowerCamelCase__ ( self : str , UpperCamelCase : Dict , UpperCamelCase : List[str] = None ):
'''simple docstring'''
__UpperCAmelCase : Dict = 0
if os.path.isdir(__UpperCAmelCase ):
__UpperCAmelCase : int = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : List[str] = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__UpperCAmelCase : int = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__UpperCAmelCase : List[str] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__UpperCAmelCase : Optional[int] = token_index
writer.write(""",""".join(__UpperCAmelCase ) + """\n""" )
index += 1
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , __UpperCAmelCase )
return vocab_file, emoji_file
class lowerCamelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = vocab # same as swe
__UpperCAmelCase : Optional[int] = ids_to_tokens # same as bpe
__UpperCAmelCase : Optional[Any] = emoji
__UpperCAmelCase : Optional[Any] = np.max([len(__UpperCAmelCase ) for w in self.vocab.keys()] )
__UpperCAmelCase : Union[str, Any] = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__UpperCAmelCase : Any = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__UpperCAmelCase : Tuple = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__UpperCAmelCase : Union[str, Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__UpperCAmelCase : Tuple = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__UpperCAmelCase : Tuple = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__UpperCAmelCase : Optional[int] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__UpperCAmelCase : Any = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__UpperCAmelCase : Optional[Any] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : int ):
'''simple docstring'''
return len(self.ids_to_tokens )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self.content_repattera.sub("""<URL>""" , __UpperCAmelCase )
__UpperCAmelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , __UpperCAmelCase )
__UpperCAmelCase : Tuple = self.content_repattera.sub("""<TEL>""" , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.content_repattera.sub("""<DATE>""" , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.content_repattera.sub("""<DATE>""" , __UpperCAmelCase )
__UpperCAmelCase : Dict = self.content_repattera.sub("""<PRICE>""" , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__UpperCAmelCase : Any = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=False ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = text.replace(""" """ , """<SP>""" )
__UpperCAmelCase : Dict = text.replace(""" """ , """<SP>""" )
__UpperCAmelCase : Optional[Any] = text.replace("""\r\n""" , """<BR>""" )
__UpperCAmelCase : List[str] = text.replace("""\n""" , """<BR>""" )
__UpperCAmelCase : Optional[int] = text.replace("""\r""" , """<BR>""" )
__UpperCAmelCase : Optional[int] = text.replace("""\t""" , """<TAB>""" )
__UpperCAmelCase : Tuple = text.replace("""—""" , """ー""" )
__UpperCAmelCase : Optional[Any] = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__UpperCAmelCase : List[str] = text.replace(__UpperCAmelCase , __UpperCAmelCase )
if clean:
__UpperCAmelCase : Optional[Any] = self.clean_text(__UpperCAmelCase )
def check_simbol(UpperCamelCase : Optional[Any] ):
__UpperCAmelCase : Any = x.encode()
if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 2:
__UpperCAmelCase : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2_A1 and c <= 0xC2_BF)
or (c >= 0xC7_80 and c <= 0xC7_83)
or (c >= 0xCA_B9 and c <= 0xCB_BF)
or (c >= 0xCC_80 and c <= 0xCD_A2)
):
return True
return False
def checkuae(UpperCamelCase : int ):
__UpperCAmelCase : List[Any] = x.encode()
if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 3:
__UpperCAmelCase : Union[str, Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_80_80 and c <= 0xE2_B0_7F:
return True
return False
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : List[Any] = []
while pos < len(__UpperCAmelCase ):
__UpperCAmelCase : Dict = min(len(__UpperCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__UpperCAmelCase : Union[str, Any] = [] # (token_id, token, pos)
for e in range(__UpperCAmelCase , __UpperCAmelCase , -1 ):
__UpperCAmelCase : Dict = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCAmelCase ) > 2:
__UpperCAmelCase : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCAmelCase ) > 0:
# the smallest token_id is adopted
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = sorted(__UpperCAmelCase , key=lambda UpperCamelCase : x[0] )[0]
result.append(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = e
else:
__UpperCAmelCase : Optional[int] = pos + 1
__UpperCAmelCase : Optional[int] = text[pos:end]
if check_simbol(__UpperCAmelCase ):
result.append("""<KIGOU>""" )
elif checkuae(__UpperCAmelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__UpperCAmelCase : Optional[int] = end
return result
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str="\n" ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[str] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCAmelCase ) > 0:
words.append(bytearray(__UpperCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
__UpperCAmelCase : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(__UpperCAmelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
words.append(bytearray(__UpperCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
__UpperCAmelCase : Union[str, Any] = """""".join(__UpperCAmelCase )
return text
| 115
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
lowercase_ = {
"google/pegasus-xsum": 512,
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PegasusTokenizer
lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Any,lowercase_ : Optional[int]=None,lowercase_ : Union[str, Any]=None,lowercase_ : Union[str, Any]="<pad>",lowercase_ : List[str]="</s>",lowercase_ : str="<unk>",lowercase_ : Union[str, Any]="<mask_2>",lowercase_ : int="<mask_1>",lowercase_ : Any=None,lowercase_ : Optional[int]=1_0_3,**lowercase_ : Tuple,)-> str:
'''simple docstring'''
A__ = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_,lowercase_ ):
raise TypeError(
F'additional_special_tokens should be of type {type(lowercase_ )}, but is'
F' {type(lowercase_ )}' )
A__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(lowercase_ ),self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
A__ = additional_special_tokens_extended
else:
A__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2,self.offset )]
super().__init__(
lowercase_,tokenizer_file=lowercase_,pad_token=lowercase_,eos_token=lowercase_,unk_token=lowercase_,mask_token=lowercase_,mask_token_sent=lowercase_,offset=lowercase_,additional_special_tokens=lowercase_,**lowercase_,)
A__ = vocab_file
A__ = False if not self.vocab_file else True
def snake_case__ ( self : str,lowercase_ : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case__ ( self : Tuple,lowercase_ : List,lowercase_ : Optional[List] = None,lowercase_ : bool = False )-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any]=None )-> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Dict,lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file,lowercase_ )
return (out_vocab_file,)
| 282
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
super().__init__()
A__ = nn.Linear(3,4 )
A__ = nn.BatchNormad(4 )
A__ = nn.Linear(4,5 )
def snake_case__ ( self : Dict,lowercase_ : Union[str, Any] )-> Tuple:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,model.state_dict() )
A__ = os.path.join(lowercase_,'index.json' )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A__ = os.path.join(lowercase_,F'{key}.dat' )
self.assertTrue(os.path.isfile(lowercase_ ) )
# TODO: add tests on the fact weights are properly loaded
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A__ = torch.randn(2,3,dtype=lowercase_ )
with TemporaryDirectory() as tmp_dir:
A__ = offload_weight(lowercase_,'weight',lowercase_,{} )
A__ = os.path.join(lowercase_,'weight.dat' )
self.assertTrue(os.path.isfile(lowercase_ ) )
self.assertDictEqual(lowercase_,{'weight': {'shape': [2, 3], 'dtype': str(lowercase_ ).split('.' )[1]}} )
A__ = load_offloaded_weight(lowercase_,index['weight'] )
self.assertTrue(torch.equal(lowercase_,lowercase_ ) )
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
A__ = ModelForTest()
A__ = model.state_dict()
A__ = {k: v for k, v in state_dict.items() if 'linear2' not in k}
A__ = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
A__ = {k: v for k, v in state_dict.items() if 'weight' in k}
A__ = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase_,lowercase_ )
# Duplicates are removed
A__ = OffloadedWeightsLoader(state_dict=lowercase_,save_folder=lowercase_ )
# Every key is there with the right value
self.assertEqual(sorted(lowercase_ ),sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase_,weight_map[key] ) )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = {'a.1': 0, 'a.10': 1, 'a.2': 2}
A__ = extract_submodules_state_dict(lowercase_,['a.1', 'a.2'] )
self.assertDictEqual(lowercase_,{'a.1': 0, 'a.2': 2} )
A__ = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
A__ = extract_submodules_state_dict(lowercase_,['a.1', 'a.2'] )
self.assertDictEqual(lowercase_,{'a.1.a': 0, 'a.2.a': 2} )
| 282
| 1
|
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCamelCase : str = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ : Any = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_cluster_input()
return config
def _snake_case ( lowerCAmelCase : Tuple=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE_ : Tuple = subparsers.add_parser("config" , description=lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser("Accelerate config command" , description=lowerCAmelCase_ )
parser.add_argument(
"--config_file" , default=lowerCAmelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(f'accelerate configuration saved at {config_file}' )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_command_parser()
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 18
|
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89
| 0
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = 256
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = ["melgan"]
def __init__( self : Union[str, Any] , __a : SpectrogramNotesEncoder , __a : SpectrogramContEncoder , __a : TaFilmDecoder , __a : DDPMScheduler , __a : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
_UpperCamelCase : Tuple = math.log(1e-5 ) # Matches MelGAN training.
_UpperCamelCase : Union[str, Any] = 4.0 # Largest value for most examples
_UpperCamelCase : str = 128
self.register_modules(
notes_encoder=__a , continuous_encoder=__a , decoder=__a , scheduler=__a , melgan=__a , )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Union[str, Any] , __a : int=(-1.0, 1.0) , __a : Union[str, Any]=False ) -> int:
_UpperCamelCase, _UpperCamelCase : List[str] = output_range
if clip:
_UpperCamelCase : int = torch.clip(__a , self.min_value , self.max_value )
# Scale to [0, 1].
_UpperCamelCase : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[str] , __a : Tuple=(-1.0, 1.0) , __a : Dict=False ) -> Optional[int]:
_UpperCamelCase, _UpperCamelCase : int = input_range
_UpperCamelCase : Optional[Any] = torch.clip(__a , __a , __a ) if clip else outputs
# Scale to [0, 1].
_UpperCamelCase : List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] , __a : str , __a : Dict ) -> str:
_UpperCamelCase : str = input_tokens > 0
_UpperCamelCase, _UpperCamelCase : List[Any] = self.notes_encoder(
encoder_input_tokens=__a , encoder_inputs_mask=__a )
_UpperCamelCase, _UpperCamelCase : Tuple = self.continuous_encoder(
encoder_inputs=__a , encoder_inputs_mask=__a )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Dict , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : List[str] = noise_time
if not torch.is_tensor(__a ):
_UpperCamelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__a ) and len(timesteps.shape ) == 0:
_UpperCamelCase : Any = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_UpperCamelCase : Union[str, Any] = self.decoder(
encodings_and_masks=__a , decoder_input_tokens=__a , decoder_noise_time=__a )
return logits
@torch.no_grad()
def __call__( self : Tuple , __a : List[List[int]] , __a : Optional[torch.Generator] = None , __a : int = 100 , __a : bool = True , __a : str = "numpy" , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__a )}.''' )
_UpperCamelCase : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_UpperCamelCase : int = np.zeros([1, 0, self.n_dims] , np.floataa )
_UpperCamelCase : Optional[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__a , device=self.device )
for i, encoder_input_tokens in enumerate(__a ):
if i == 0:
_UpperCamelCase : Optional[int] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_UpperCamelCase : Union[str, Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__a , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_UpperCamelCase : Dict = ones
_UpperCamelCase : str = self.scale_features(
__a , output_range=[-1.0, 1.0] , clip=__a )
_UpperCamelCase : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__a , continuous_mask=__a , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_UpperCamelCase : int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__a , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__a )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCamelCase : Optional[Any] = self.decode(
encodings_and_masks=__a , input_tokens=__a , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_UpperCamelCase : List[str] = self.scheduler.step(__a , __a , __a , generator=__a ).prev_sample
_UpperCamelCase : Tuple = self.scale_to_features(__a , input_range=[-1.0, 1.0] )
_UpperCamelCase : Optional[Any] = mel[:1]
_UpperCamelCase : int = mel.cpu().float().numpy()
_UpperCamelCase : int = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a )
logger.info("Generated segment" , __a )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
_UpperCamelCase : Dict = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_UpperCamelCase : List[Any] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__a )
| 310
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Any = False
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ : Tuple = parser.parse_args()
lowerCAmelCase__ : str = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCAmelCase__ : Any = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCAmelCase__ : Union[str, Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCAmelCase__ : Optional[Any] = reader.read()
lowerCAmelCase__ : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCAmelCase__ : List[Any] = UNetaDModel(**config)
else:
lowerCAmelCase__ : Tuple = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCAmelCase__ : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ : List[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ : List[Any] = config[key]
del config[key]
lowerCAmelCase__ : Union[str, Any] = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCAmelCase__ : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCAmelCase__ : int = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCAmelCase__ : Union[str, Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCAmelCase__ : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCAmelCase__ : int = param_value
lowerCAmelCase__ : Tuple = True
if not has_changed:
lowerCAmelCase__ : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 143
|
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89
| 0
|
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case__ : List[str] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _snake_case ( _snake_case : List[Any] ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _snake_case ( _snake_case : Union[str, Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def _snake_case ( _snake_case : Optional[int] ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase : Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
def _snake_case ( _snake_case : str , _snake_case : Any ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase : Tuple = 0
# Doctest custom flag to ignore output.
snake_case__ : List[Any] = doctest.register_optionflag('''IGNORE_RESULT''')
snake_case__ : Dict = doctest.OutputChecker
class snake_case_( a__ ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ : Any = CustomOutputChecker
snake_case__ : Any = HfDoctestModule
snake_case__ : List[Any] = HfDocTestParser
| 353
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase_ , unittest.TestCase ):
__a : Any = DebertaTokenizer
__a : Optional[int] = True
__a : Tuple = DebertaTokenizerFast
def A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''[UNK]'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def A ( self : str , **lowercase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def A ( self : str , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = '''lower newer'''
return input_text, output_text
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = tokenizer('''Hello''' , '''World''' )
UpperCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCAmelCase = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 34
|
lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_A : Optional[int] = True
# sum is not zero and set is empty then false
for i in range(1,required_sum + 1 ):
_A : Optional[int] = False
for i in range(1,arr_len + 1 ):
for j in range(1,required_sum + 1 ):
if arr[i - 1] > j:
_A : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
_A : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowerCamelCase : str = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowerCamelCase : Dict = {'''facebook/blenderbot-3B''': 128}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ["input_ids", "attention_mask"]
_UpperCAmelCase : Dict = BlenderbotTokenizer
def __init__( self : List[Any] , lowercase : int=None , lowercase : Any=None , lowercase : Tuple=None , lowercase : Dict="replace" , lowercase : Optional[int]="<s>" , lowercase : Optional[Any]="</s>" , lowercase : Any="</s>" , lowercase : List[Any]="<s>" , lowercase : List[Any]="<unk>" , lowercase : Any="<pad>" , lowercase : List[str]="<mask>" , lowercase : List[str]=False , lowercase : Optional[int]=True , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = getattr(lowercase , pre_tok_state.pop('type' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**lowercase )
_snake_case = add_prefix_space
_snake_case = 'post_processor'
_snake_case = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
_snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case = tuple(state['sep'] )
if "cls" in state:
_snake_case = tuple(state['cls'] )
_snake_case = False
if state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_snake_case = add_prefix_space
_snake_case = True
if state.get('trim_offsets' , lowercase ) != trim_offsets:
_snake_case = trim_offsets
_snake_case = True
if changes_to_apply:
_snake_case = getattr(lowercase , state.pop('type' ) )
_snake_case = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def A ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
_snake_case = value
def A ( self : int , *lowercase : Any , **lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def A ( self : Dict , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def A ( self : List[Any] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
_snake_case = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def A ( self : Optional[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : List[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def A ( self : Tuple , lowercase : "Conversation" ):
'''simple docstring'''
_snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowercase )
_snake_case = ' '.join(lowercase )
_snake_case = self.encode(lowercase )
if len(lowercase ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 282
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase )
| 282
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "token_type_ids"]
SCREAMING_SNAKE_CASE_ = FNetTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__="<unk>", lowerCAmelCase__="[SEP]", lowerCAmelCase__="<pad>", lowerCAmelCase__="[CLS]", lowerCAmelCase__="[MASK]", **lowerCAmelCase__, ) -> int:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ = (
AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__, normalized=lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__)
else mask_token
)
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, remove_space=lowerCAmelCase__, keep_accents=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
def _A ( _lowercase ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowercase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __lowerCamelCase (_a ):
_lowercase = ["""pixel_values"""]
def __init__( self: Optional[Any],A_: bool = True,A_: Dict[str, int] = None,A_: PILImageResampling = PILImageResampling.BILINEAR,A_: bool = True,A_: Dict[str, int] = None,A_: bool = True,A_: Union[int, float] = 1 / 255,A_: bool = True,A_: bool = True,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,**A_: Union[str, Any],):
'''simple docstring'''
super().__init__(**A_ )
__UpperCamelCase = size if size is not None else {'shortest_edge': 256}
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
__UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase = get_size_dict(A_,param_name='crop_size' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = resample
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = offset
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self: Dict,A_: np.ndarray,A_: Dict[str, int],A_: PILImageResampling = PILImageResampling.BILINEAR,A_: Optional[Union[str, ChannelDimension]] = None,**A_: int,):
'''simple docstring'''
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(A_,size['shortest_edge'],default_to_square=A_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(A_,size=A_,resample=A_,data_format=A_,**A_ )
def snake_case_ ( self: Tuple,A_: np.ndarray,A_: Dict[str, int],A_: Optional[Union[str, ChannelDimension]] = None,**A_: Tuple,):
'''simple docstring'''
__UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(A_,size=(size['height'], size['width']),data_format=A_,**A_ )
def snake_case_ ( self: List[str],A_: np.ndarray,A_: Union[int, float],A_: bool = True,A_: Optional[Union[str, ChannelDimension]] = None,**A_: Optional[Any],):
'''simple docstring'''
__UpperCamelCase = image.astype(np.floataa )
if offset:
__UpperCamelCase = image - (scale / 2)
return rescale(A_,scale=A_,data_format=A_,**A_ )
def snake_case_ ( self: List[str],A_: np.ndarray,A_: Union[float, List[float]],A_: Union[float, List[float]],A_: Optional[Union[str, ChannelDimension]] = None,**A_: List[str],):
'''simple docstring'''
return normalize(A_,mean=A_,std=A_,data_format=A_,**A_ )
def snake_case_ ( self: Union[str, Any],A_: ImageInput,A_: bool = None,A_: Dict[str, int] = None,A_: PILImageResampling = None,A_: bool = None,A_: Dict[str, int] = None,A_: bool = None,A_: float = None,A_: bool = None,A_: bool = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[ChannelDimension] = ChannelDimension.FIRST,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = to_numpy_array(A_ )
if do_resize:
__UpperCamelCase = self.resize(image=A_,size=A_,resample=A_ )
if do_center_crop:
__UpperCamelCase = self.center_crop(A_,size=A_ )
if do_rescale:
__UpperCamelCase = self.rescale(image=A_,scale=A_,offset=A_ )
if do_normalize:
__UpperCamelCase = self.normalize(image=A_,mean=A_,std=A_ )
__UpperCamelCase = to_channel_dimension_format(A_,A_ )
return image
def snake_case_ ( self: List[str],A_: ImageInput,A_: bool = None,A_: Dict[str, int] = None,A_: PILImageResampling = None,A_: bool = None,A_: Dict[str, int] = None,A_: bool = None,A_: float = None,A_: bool = None,A_: bool = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[float, List[float]]] = None,A_: Optional[Union[str, TensorType]] = None,A_: ChannelDimension = ChannelDimension.FIRST,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = offset if offset is not None else self.offset
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(A_,default_to_square=A_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(A_,param_name='crop_size' )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__UpperCamelCase = make_batched(A_ )
__UpperCamelCase = [
[
self._preprocess_image(
image=A_,do_resize=A_,size=A_,resample=A_,do_center_crop=A_,crop_size=A_,do_rescale=A_,rescale_factor=A_,offset=A_,do_normalize=A_,image_mean=A_,image_std=A_,data_format=A_,)
for img in video
]
for video in videos
]
__UpperCamelCase = {'pixel_values': videos}
return BatchFeature(data=A_,tensor_type=A_ )
| 310
|
import torch
from transformers import AutoModel
class __lowerCamelCase (torch.nn.Module ):
def __init__( self: Union[str, Any],A_: Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(A_,self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(A_,return_dict=A_ )
__UpperCamelCase = torch.nn.CosineSimilarity(3,1E-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def snake_case_ ( self: Tuple,**A_: Union[str, Any] ):
'''simple docstring'''
return self.bert(**A_ ).last_hidden_state
def snake_case_ ( self: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2,keepdim=A_ )
def snake_case_ ( self: List[str],A_: Dict,A_: Union[str, Any],A_: Union[str, Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(A_,A_ ) )
def snake_case_ ( self: Optional[int],A_: Union[str, Any],A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = self.BERT(**A_ )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(A_ ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i],s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i],s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 310
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 230
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes:
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
return baseaa.aaadecode(lowercase_ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314
| 0
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase__ = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase__ = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase__ = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if version.parse(scb.__version__) < version.parse('1.4.12'):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'),
}) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ):
'''simple docstring'''
__A : Union[str, Any] = len(references[0])
if any(len(_UpperCAmelCase) != references_per_prediction for refs in references):
raise ValueError('Sacrebleu requires the same number of references for each prediction')
__A : Optional[Any] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase)]
__A : List[str] = TER(
normalized=_UpperCAmelCase , no_punct=_UpperCAmelCase , asian_support=_UpperCAmelCase , case_sensitive=_UpperCAmelCase , )
__A : Union[str, Any] = sb_ter.corpus_score(_UpperCAmelCase , _UpperCAmelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 369
|
'''simple docstring'''
from math import pi, sqrt, tan
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__A : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__snake_case , 2 ) * torus_radius * tube_radius
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _lowerCAmelCase ( __snake_case : float ) -> float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__A : int = (sidea + sidea + sidea) / 2
__A : Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _lowerCAmelCase ( __snake_case : float ) -> float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _lowerCAmelCase ( __snake_case : float , __snake_case : float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowerCAmelCase ( __snake_case : int , __snake_case : float ) -> float:
if not isinstance(__snake_case , __snake_case ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 190
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Tuple) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=lowerCamelCase_ , )
assert hasattr(self , 'env')
def _lowercase (self : Any , _A : List[str]) -> str:
__snake_case : List[str] = {
'enabled': True,
'processes_per_host': 8,
}
__snake_case : Tuple = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__snake_case : Optional[Any] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__snake_case : Union[str, Any] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version='py36' , )
def _lowercase (self : List[str] , _A : List[Any]) -> Dict:
TrainingJobAnalytics(lowerCamelCase_).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def _lowercase (self : Union[str, Any] , _A : int) -> str:
__snake_case : Optional[int] = self.create_estimator(lowerCamelCase_)
# run training
estimator.fit()
# result dataframe
__snake_case : int = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__snake_case : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__snake_case : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case : int = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCamelCase_)
| 172
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(lowerCamelCase_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
UpperCamelCase = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" )
model.save(lowerCamelCase_ )
UpperCamelCase = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
UpperCamelCase = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["""logits"""].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 343
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : Any = 32
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase = 16) -> Dict:
a = AutoTokenizer.from_pretrained("bert-base-cased")
a = load_dataset("glue" , "mrpc")
def tokenize_function(__UpperCamelCase):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(__UpperCamelCase):
# On TPU it's best to pad everything to the same length or training will be very slow.
a = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a = 16
elif accelerator.mixed_precision != "no":
a = 8
else:
a = None
return tokenizer.pad(
__UpperCamelCase , padding="longest" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase)
a = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Tuple = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Optional[Any]:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __UpperCamelCase) == "1":
a = 2
# New Code #
a = int(args.gradient_accumulation_steps)
a = int(args.local_sgd_steps)
# Initialize accelerator
a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase)
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config['lr']
a = int(config["num_epochs"])
a = int(config["seed"])
a = int(config["batch_size"])
a = evaluate.load("glue" , "mrpc")
set_seed(__UpperCamelCase)
a = get_dataloaders(__UpperCamelCase , __UpperCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device)
# Instantiate optimizer
a = AdamW(params=model.parameters() , lr=__UpperCamelCase)
# Instantiate scheduler
a = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Now we train the model
for epoch in range(__UpperCamelCase):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None) as local_sgd:
for step, batch in enumerate(__UpperCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase):
a = model(**__UpperCamelCase)
a = output.loss
accelerator.backward(__UpperCamelCase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a = model(**__UpperCamelCase)
a = outputs.logits.argmax(dim=-1)
a = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __UpperCamelCase)
def SCREAMING_SNAKE_CASE ( ) -> Dict:
a = argparse.ArgumentParser(description="Simple example of training script.")
parser.add_argument(
"--mixed_precision" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=__UpperCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=__UpperCamelCase , default=8 , help="Number of local SGD steps or None to disable local SGD")
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.")
a = parser.parse_args()
a = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase , __UpperCamelCase)
if __name__ == "__main__":
main()
| 356
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """table-transformer"""
a : Tuple = ["""past_key_values"""]
a : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A , A ):
a = backbone_config.get("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(A )
# set timm attributes to None
a , a , a = None, None, None
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = decoder_layerdrop
a = encoder_layers
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class a__ ( UpperCamelCase__ ):
a : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return 12
| 180
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class A_ ( snake_case_ ):
_lowercase : Optional[Any] = 'codegen'
_lowercase : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , UpperCAmelCase : str=5_0_4_0_0 , UpperCAmelCase : str=2_0_4_8 , UpperCAmelCase : Optional[Any]=2_0_4_8 , UpperCAmelCase : Tuple=4_0_9_6 , UpperCAmelCase : str=2_8 , UpperCAmelCase : int=1_6 , UpperCAmelCase : int=6_4 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]="gelu_new" , UpperCAmelCase : str=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=1E-5 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=5_0_2_5_6 , UpperCAmelCase : Dict=5_0_2_5_6 , UpperCAmelCase : Dict=False , **UpperCAmelCase : Any , ) -> Tuple:
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = n_ctx
__lowerCAmelCase: int = n_positions
__lowerCAmelCase: Optional[Any] = n_embd
__lowerCAmelCase: str = n_layer
__lowerCAmelCase: Tuple = n_head
__lowerCAmelCase: List[Any] = n_inner
__lowerCAmelCase: int = rotary_dim
__lowerCAmelCase: Tuple = activation_function
__lowerCAmelCase: int = resid_pdrop
__lowerCAmelCase: Dict = embd_pdrop
__lowerCAmelCase: Dict = attn_pdrop
__lowerCAmelCase: Optional[int] = layer_norm_epsilon
__lowerCAmelCase: List[str] = initializer_range
__lowerCAmelCase: Dict = use_cache
__lowerCAmelCase: List[Any] = bos_token_id
__lowerCAmelCase: Optional[Any] = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class A_ ( snake_case_ ):
def __init__( self : Optional[int] , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ) -> Any:
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , 'pad_token_id' , UpperCAmelCase ):
# TODO: how to do that better?
__lowerCAmelCase: List[Any] = 0
@property
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Any = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='inputs' )
__lowerCAmelCase: List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase: str = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self._config.n_layer
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self._config.n_head
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase: str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase: Dict = seqlen + 2
__lowerCAmelCase: str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase: Optional[Any] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
__lowerCAmelCase: Optional[int] = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase: Any = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase: List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self : str ) -> int:
return 1_3
| 322
|
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowercase__ = {
"""google/rembert""": 256,
}
lowercase__ = """▁"""
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[Any] = VOCAB_FILES_NAMES
a_ : Dict = PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[str] = RemBertTokenizer
def __init__( self : Optional[int] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : Dict=True , a_ : List[Any]=True , a_ : List[Any]=False , a_ : int="[CLS]" , a_ : Any="[SEP]" , a_ : str="<unk>" , a_ : Dict="[SEP]" , a_ : Tuple="<pad>" , a_ : Optional[Any]="[CLS]" , a_ : List[str]="[MASK]" , **a_ : Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
lowerCAmelCase_ : str = do_lower_case
lowerCAmelCase_ : str = remove_space
lowerCAmelCase_ : Any = keep_accents
lowerCAmelCase_ : Dict = vocab_file
lowerCAmelCase_ : int = False if not self.vocab_file else True
def lowerCamelCase ( self : Tuple , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1]
def lowerCamelCase ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Optional[int] , a_ : str , a_ : Optional[str] = None ):
if not os.path.isdir(a_ ):
logger.error("Vocabulary path ({}) should be a directory".format(a_ ) )
return
lowerCAmelCase_ : List[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 161
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Any = """dandelin/vilt-b32-finetuned-vqa"""
a_ : List[str] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
a_ : Dict = """image_qa"""
a_ : Tuple = AutoProcessor
a_ : Optional[int] = AutoModelForVisualQuestionAnswering
a_ : Tuple = ["""image""", """text"""]
a_ : Optional[int] = ["""text"""]
def __init__( self : Dict , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(self , ["vision"] )
super().__init__(*a_ , **a_ )
def lowerCamelCase ( self : int , a_ : "Image" , a_ : str ):
return self.pre_processor(a_ , a_ , return_tensors="pt" )
def lowerCamelCase ( self : List[Any] , a_ : Optional[int] ):
with torch.no_grad():
return self.model(**a_ ).logits
def lowerCamelCase ( self : List[Any] , a_ : Optional[Any] ):
lowerCAmelCase_ : List[str] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 161
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A__ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
A__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] = '''https://pypi.org/pypi/diffusers/json'''
snake_case__ : str = json.loads(request.urlopen(__lowerCAmelCase ).read() )['''releases'''].keys()
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : version.Version(__lowerCAmelCase ) )
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case__ : int = Path(__lowerCAmelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
init_hf_modules()
snake_case__ : Dict = Path(__lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case__ : List[str] = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ : int = f.read()
# Imports of the form `import .xxx`
snake_case__ : List[str] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = False
snake_case__ : Dict = [module_file]
snake_case__ : Optional[Any] = []
# Let's recurse through all relative imports
while not no_change:
snake_case__ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__lowerCAmelCase ) )
snake_case__ : Union[str, Any] = Path(__lowerCAmelCase ).parent
snake_case__ : List[Any] = [str(module_path / m ) for m in new_imports]
snake_case__ : Optional[int] = [f for f in new_import_files if f not in all_relative_imports]
snake_case__ : int = [f"""{f}.py""" for f in new_import_files]
snake_case__ : Dict = len(__lowerCAmelCase ) == 0
all_relative_imports.extend(__lowerCAmelCase )
return all_relative_imports
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ : Optional[int] = f.read()
# Imports of the form `import xxx`
snake_case__ : str = re.findall('''^\s*import\s+(\S+)\s*$''' , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
snake_case__ : str = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
snake_case__ : Optional[Any] = list(set(__lowerCAmelCase ) )
snake_case__ : int = []
for imp in imports:
try:
importlib.import_module(__lowerCAmelCase )
except ImportError:
missing_packages.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"""{', '.join(__lowerCAmelCase )}. Run `pip install {' '.join(__lowerCAmelCase )}`""" )
return get_relative_imports(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] = module_path.replace(os.path.sep , '''.''' )
snake_case__ : Dict = importlib.import_module(__lowerCAmelCase )
if class_name is None:
return find_pipeline_class(__lowerCAmelCase )
return getattr(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
snake_case__ : List[Any] = dict(inspect.getmembers(__lowerCAmelCase , inspect.isclass ) )
snake_case__ : Optional[int] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __lowerCAmelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
snake_case__ : List[Any] = cls
return pipeline_class
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> int:
"""simple docstring"""
snake_case__ : List[str] = str(__lowerCAmelCase )
snake_case__ : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
snake_case__ : List[str] = module_file_or_url
snake_case__ : Union[str, Any] = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
snake_case__ : Any = get_diffusers_versions()
# cut ".dev0"
snake_case__ : int = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
snake_case__ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
snake_case__ : str = f"""v{revision}"""
elif revision == "main":
snake_case__ : List[Any] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
snake_case__ : List[str] = COMMUNITY_PIPELINES_URL.format(revision=__lowerCAmelCase , pipeline=__lowerCAmelCase )
try:
snake_case__ : List[str] = cached_download(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
snake_case__ : int = '''git'''
snake_case__ : Union[str, Any] = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
snake_case__ : Any = hf_hub_download(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
snake_case__ : List[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
snake_case__ : Optional[int] = check_imports(__lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
snake_case__ : Any = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__lowerCAmelCase )
snake_case__ : Dict = Path(__lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
snake_case__ : List[Any] = f"""{module_needed}.py"""
shutil.copy(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : str = use_auth_token
elif use_auth_token is True:
snake_case__ : Tuple = HfFolder.get_token()
else:
snake_case__ : Dict = None
snake_case__ : Optional[Any] = model_info(__lowerCAmelCase , revision=__lowerCAmelCase , token=__lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
snake_case__ : int = submodule_path / commit_hash
snake_case__ : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__lowerCAmelCase , f"""{module_needed}.py""" , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return os.path.join(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = get_cached_module_file(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return get_class_in_module(__lowerCAmelCase , final_module.replace('''.py''' , '''''' ) )
| 230
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A__ = datasets.logging.get_logger(__name__)
A__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="dummy_doc" ) -> int:
"""simple docstring"""
snake_case__ : Dict = {doc: key_lines}
snake_case__ : Any = {doc: sys_lines}
snake_case__ : Dict = {}
snake_case__ : List[str] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Dict = 0
snake_case__ : List[Any] = 0
snake_case__ : List[Any] = 0
snake_case__ , snake_case__ : Tuple = reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case__ : str = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ : int = reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case__ : Union[str, Any] = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
if remove_nested:
snake_case__ , snake_case__ : Dict = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case__ , snake_case__ : Optional[int] = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case__ : Any = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = {}
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = 0
for name, metric in metrics:
snake_case__ , snake_case__ , snake_case__ : Any = evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
snake_case__ : int = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : str = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case__ : List[Any] = line.split()[5]
if not parse_col == "-":
snake_case__ : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) ,codebase_urls=['''https://github.com/ns-moosavi/coval'''] ,reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] ,)
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :str=True ,__lowercase :Optional[int]=False ,__lowercase :Optional[Any]=False ,__lowercase :Tuple=False ):
snake_case__ : Optional[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case__ : Optional[int] = util.check_gold_parse_annotation(__lowercase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case__ : Any = evaluate(
key_lines=__lowercase ,sys_lines=__lowercase ,metrics=__lowercase ,NP_only=__lowercase ,remove_nested=__lowercase ,keep_singletons=__lowercase ,min_span=__lowercase ,)
return score
| 230
| 1
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _snake_case ( lowerCAmelCase_ , unittest.TestCase ):
lowerCAmelCase_ : Dict = XLMProphetNetTokenizer
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Dict = True
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "[PAD]"
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_012 )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = "Hello World!"
snake_case_ = [35_389, 6_672, 49, 2]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 352
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[int] = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 92
| 0
|
import numpy as np
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
self.set_matricies(red=_UpperCAmelCase , green=_UpperCAmelCase , blue=_UpperCAmelCase , red_edge=_UpperCAmelCase , nir=_UpperCAmelCase )
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if red is not None:
__UpperCamelCase : Dict =red
if green is not None:
__UpperCamelCase : List[Any] =green
if blue is not None:
__UpperCamelCase : Optional[Any] =blue
if red_edge is not None:
__UpperCamelCase : Optional[Any] =red_edge
if nir is not None:
__UpperCamelCase : Optional[Any] =nir
return True
def __lowercase ( self , lowerCamelCase__="" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
self.set_matricies(red=_UpperCAmelCase , green=_UpperCAmelCase , blue=_UpperCAmelCase , red_edge=_UpperCAmelCase , nir=_UpperCAmelCase )
__UpperCamelCase : Optional[int] ={
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def __lowercase ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowercase ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowercase ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def __lowercase ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowercase ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowercase ( self , lowerCamelCase__=0.08 , lowerCamelCase__=1.22 , lowerCamelCase__=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowercase ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def __lowercase ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def __lowercase ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowercase ( self ):
"""simple docstring"""
return self.nir - self.green
def __lowercase ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def __lowercase ( self , lowerCamelCase__=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowercase ( self , lowerCamelCase__=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowercase ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __lowercase ( self , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowercase ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def __lowercase ( self ):
"""simple docstring"""
return self.nir / self.red
def __lowercase ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowercase ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowercase ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def __lowercase ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def __lowercase ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def __lowercase ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def __lowercase ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__UpperCamelCase : Dict =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowercase ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowercase ( self ):
"""simple docstring"""
return self.nir / self.red
def __lowercase ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowercase ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 71
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase = "▁" , _UpperCAmelCase = True , _UpperCAmelCase = "<unk>" , _UpperCAmelCase = "</s>" , _UpperCAmelCase = "<pad>" , ):
'''simple docstring'''
__A : Dict = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__A : List[Any] = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
__A : List[str] = token_dict['token']
__A : str = Tokenizer(Unigram())
__A : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}') , ' '),
normalizers.Lowercase(),
])
__A : Union[str, Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
__A : Any = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase)
__A : Dict = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__A : Any = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 8000 , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : str = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 8000 , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Dict = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = json.loads(self._tokenizer.to_str())
__A : Union[str, Any] = self.special_tokens['unk']['id']
__A : str = Tokenizer.from_str(json.dumps(_UpperCAmelCase))
| 190
| 0
|
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase = '''docs/source/en/_toctree.yml'''
def snake_case_ ( snake_case ) -> Dict:
lowercase__: List[str] = defaultdict(snake_case )
lowercase__: Dict = []
lowercase__: Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(snake_case )
lowercase__: str = new_doc_list
lowercase__: int = [key for key, value in counts.items() if value > 1]
lowercase__: str = []
for duplicate_key in duplicates:
lowercase__: Dict = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
lowercase__: str = sorted(snake_case , key=lambda snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(snake_case )
# Sort
return overview_doc
def snake_case_ ( snake_case=False ) -> Union[str, Any]:
with open(snake_case , encoding='utf-8' ) as f:
lowercase__: str = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__: List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__: Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
lowercase__: Optional[int] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__: str = api_doc[scheduler_idx]['sections']
lowercase__: Optional[int] = clean_doc_toc(snake_case )
lowercase__: Tuple = False
if new_scheduler_doc != scheduler_doc:
lowercase__: int = True
if overwrite:
lowercase__: Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
lowercase__: Any = api_doc
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def snake_case_ ( snake_case=False ) -> int:
with open(snake_case , encoding='utf-8' ) as f:
lowercase__: Dict = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__: Dict = content[api_idx]['sections']
# Then to the model doc
lowercase__: Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__: Union[str, Any] = False
lowercase__: Dict = api_doc[pipeline_idx]['sections']
lowercase__: Optional[int] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__: str = pipeline_doc['section']
lowercase__: Any = clean_doc_toc(snake_case )
if overwrite:
lowercase__: List[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case )
# sort overall pipeline doc
lowercase__: str = clean_doc_toc(snake_case )
if new_pipeline_docs != pipeline_docs:
lowercase__: Union[str, Any] = True
if overwrite:
lowercase__: List[str] = new_pipeline_docs
if diff:
if overwrite:
lowercase__: Any = api_doc
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 288
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowerCAmelCase = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
__lowerCAmelCase = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> List[Any]:
'''simple docstring'''
lowercase__: str = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowercase__: List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
lowercase__: Union[str, Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 288
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE = 'src/transformers'
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*try:')
# Catches a line with else:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*else:')
def snake_case ( snake_case__ :Optional[Any]) -> List[str]:
if _re_test_backend.search(snake_case__) is None:
return None
_A = [b[0] for b in _re_backend.findall(snake_case__)]
backends.sort()
return "_and_".join(snake_case__)
def snake_case ( snake_case__ :Any) -> Any:
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
_A = f.readlines()
_A = 0
while line_index < len(snake_case__) and not lines[line_index].startswith("""_import_structure = {"""):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith("""if TYPE_CHECKING""") and find_backend(lines[line_index]) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__):
_A = _re_one_line_import_struct.search(snake_case__).groups()[0]
_A = re.findall("""\[([^\]]+)\]""" , snake_case__)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """)])
line_index += 1
continue
_A = _re_import_struct_key_value.search(snake_case__)
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """) if len(snake_case__) > 0]
objects.extend(snake_case__)
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING"""):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 4):
_A = lines[line_index]
if _re_import_struct_add_one.search(snake_case__) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__).groups()[0])
elif _re_import_struct_add_many.search(snake_case__) is not None:
_A = _re_import_struct_add_many.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_between_brackets.search(snake_case__) is not None:
_A = _re_between_brackets.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_quote_object.search(snake_case__) is not None:
objects.append(_re_quote_object.search(snake_case__).groups()[0])
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
elif line.startswith(""" """ * 12 + """\""""):
objects.append(line[13:-3])
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(snake_case__)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("""else""")
):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 8):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 12):
objects.append(line[12:-2])
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case ( snake_case__ :Dict , snake_case__ :int) -> List[Any]:
def find_duplicates(snake_case__ :Union[str, Any]):
return [k for k, v in collections.Counter(snake_case__).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''')
_A = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
_A = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def snake_case ( ) -> int:
_A = []
for root, _, files in os.walk(snake_case__):
if "__init__.py" in files:
_A = os.path.join(snake_case__ , """__init__.py""")
_A = parse_init(snake_case__)
if objects is not None:
_A = analyze_results(*snake_case__)
if len(snake_case__) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(snake_case__))
if len(snake_case__) > 0:
raise ValueError("""\n\n""".join(snake_case__))
def snake_case ( ) -> Optional[Any]:
_A = []
for path, directories, files in os.walk(snake_case__):
for folder in directories:
# Ignore private modules
if folder.startswith("""_"""):
directories.remove(snake_case__)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__) / folder).glob("""*.py"""))) == 0:
continue
_A = str((Path(snake_case__) / folder).relative_to(snake_case__))
_A = short_path.replace(os.path.sep , """.""")
submodules.append(snake_case__)
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(snake_case__) / fname).relative_to(snake_case__))
_A = short_path.replace(""".py""" , """""").replace(os.path.sep , """.""")
if len(submodule.split(""".""")) == 1:
submodules.append(snake_case__)
return submodules
_SCREAMING_SNAKE_CASE = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(snake_case__ , """__init__.py""") , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_A = spec.loader.load_module()
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case__) > 0:
_A = """\n""".join(F'''- {module}''' for module in module_not_registered)
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 180
| 0
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ):
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : Optional[Any] = np.asarray(weights[0] )
snake_case_ : int = np.asarray(weights[1] )
snake_case_ : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ):
'''simple docstring'''
# set torch weights for 1-to-1 comparison
snake_case_ : List[Any] = np.asarray(weights[0] )
snake_case_ : Optional[int] = np.asarray(weights[1] )
snake_case_ : Union[str, Any] = np.asarray(weights[2] )
snake_case_ : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
# layernorm 1
snake_case_ : str = weights[0][0][0]
snake_case_ : int = np.asarray(layer_norm_a[0] )
snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# lsh weights + output
snake_case_ : Tuple = weights[0][1]
if len(lowerCamelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
else:
set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ )
# intermediate weighs
snake_case_ : str = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase_ ) == 4:
snake_case_ : List[Any] = intermediate_weights[2]
# layernorm 2
snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] )
snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# intermediate dense
snake_case_ : Any = np.asarray(intermediate_weights[1][0] )
snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
# intermediate out
snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] )
snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ):
'''simple docstring'''
# reformer model
snake_case_ : Dict = torch_model.reformer
# word embeds
snake_case_ : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , )
if isinstance(weights[3] , lowerCamelCase_ ):
snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) )
snake_case_ : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# output layer norm
snake_case_ : Optional[Any] = np.asarray(weights[7][0] )
snake_case_ : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , )
# output embeddings
snake_case_ : Optional[int] = np.asarray(weights[9][0] )
snake_case_ : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , )
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ):
'''simple docstring'''
# Initialise PyTorch model
snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ )
with open(lowerCamelCase_ , """rb""" ) as f:
snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 8
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ):
'''simple docstring'''
snake_case_ : Tuple = x_start
snake_case_ : Optional[int] = fnc(lowerCamelCase_ )
snake_case_ : Optional[int] = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case_ : int = (x_end - x_start) / steps + xa
snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case_ : Any = xa
snake_case_ : str = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__A : List[str] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 8
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Union[str, Any] = 'dandelin/vilt-b32-finetuned-vqa'
UpperCAmelCase__ : Any = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
UpperCAmelCase__ : List[Any] = 'image_qa'
UpperCAmelCase__ : List[str] = AutoProcessor
UpperCAmelCase__ : List[str] = AutoModelForVisualQuestionAnswering
UpperCAmelCase__ : Optional[int] = ['image', 'text']
UpperCAmelCase__ : Tuple = ['text']
def __init__( self :Any , *_A :int , **_A :List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_A , **_A )
def lowercase_ ( self :Tuple , _A :"Image" , _A :str ) -> int:
'''simple docstring'''
return self.pre_processor(_A , _A , return_tensors='pt' )
def lowercase_ ( self :Optional[Any] , _A :List[Any] ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
return self.model(**_A ).logits
def lowercase_ ( self :Tuple , _A :str ) -> List[Any]:
'''simple docstring'''
__A = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 161
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : str = KandinskyImgaImgPipeline
UpperCAmelCase__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
UpperCAmelCase__ : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
UpperCAmelCase__ : Union[str, Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCAmelCase__ : Any = False
@property
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[int] ) -> str:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self :Optional[Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def lowercase_ ( self :Tuple ) -> Tuple:
'''simple docstring'''
__A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__A = MultilingualCLIP(_A )
__A = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__A = UNetaDConditionModel(**_A )
return model
@property
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__A = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_unet
__A = self.dummy_movq
__A = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__A = DDIMScheduler(**_A )
__A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self :Dict , _A :Union[str, Any] , _A :Optional[int]=0 ) -> str:
'''simple docstring'''
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**_A )
__A = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = pipe(**self.get_dummy_inputs(_A ) )
__A = output.images
__A = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__A = 'A red cartoon frog, 4k'
__A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
__A = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A , __A = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__A = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 161
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] =parent
UpperCAmelCase : Union[str, Any] =batch_size
UpperCAmelCase : Union[str, Any] =seq_length
UpperCAmelCase : Optional[Any] =is_training
UpperCAmelCase : Tuple =use_input_mask
UpperCAmelCase : List[str] =use_token_type_ids
UpperCAmelCase : List[str] =use_labels
UpperCAmelCase : int =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : Optional[int] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : Tuple =intermediate_size
UpperCAmelCase : List[str] =hidden_act
UpperCAmelCase : Optional[int] =hidden_dropout_prob
UpperCAmelCase : Union[str, Any] =attention_probs_dropout_prob
UpperCAmelCase : str =max_position_embeddings
UpperCAmelCase : Optional[Any] =type_vocab_size
UpperCAmelCase : Optional[int] =type_sequence_label_size
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : str =num_labels
UpperCAmelCase : Any =num_choices
UpperCAmelCase : int =scope
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[str] =None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any =None
if self.use_token_type_ids:
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Dict =None
UpperCAmelCase : Any =None
UpperCAmelCase : Optional[Any] =None
if self.use_labels:
UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Any =ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : str =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : Union[str, Any] =model(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase : Any =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =BioGptForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# create attention mask
UpperCAmelCase : Any =torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
UpperCAmelCase : Optional[int] =self.seq_length // 2
UpperCAmelCase : Union[str, Any] =0
# first forward pass
UpperCAmelCase , UpperCAmelCase : List[Any] =model(snake_case__ , attention_mask=snake_case__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Any =ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase : Any =ids_tensor((1,) , snake_case__ ).item() + 1
UpperCAmelCase : str =ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase : Tuple =random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase : Optional[Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Union[str, Any] =torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case__ )] , dim=1 , )
# get two different outputs
UpperCAmelCase : Dict =model(snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
UpperCAmelCase : Optional[int] =model(snake_case__ , past_key_values=snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase : str =ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : int =output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase : Optional[int] =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =BioGptModel(config=snake_case__ ).to(snake_case__ ).eval()
UpperCAmelCase : Union[str, Any] =torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
# first forward pass
UpperCAmelCase : Union[str, Any] =model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase , UpperCAmelCase : List[str] =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] =ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any =torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ )['''last_hidden_state''']
UpperCAmelCase : Union[str, Any] =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[
'''last_hidden_state'''
]
# select random slice
UpperCAmelCase : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any =output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[Any] =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , snake_case__=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =BioGptForCausalLM(snake_case__ )
model.to(snake_case__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase : int =model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase__ ( self , snake_case__ , *snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str =BioGptModel(snake_case__ )
UpperCAmelCase : int =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any =self.num_labels
UpperCAmelCase : int =BioGptForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : int =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[Any] =config_and_inputs
UpperCAmelCase : Any ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowerCamelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : int = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[int] = False
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =BioGptModelTester(self )
UpperCAmelCase : str =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Optional[Any] =type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case__ , gradient_checkpointing=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case__ )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(snake_case__ )
UpperCAmelCase : Optional[int] =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase : Dict ='''left'''
# Define PAD Token = EOS Token = 50256
UpperCAmelCase : int =tokenizer.eos_token
UpperCAmelCase : Any =model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase : Optional[Any] =[
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCAmelCase : Union[str, Any] =tokenizer(snake_case__ , return_tensors='''pt''' , padding=snake_case__ )
UpperCAmelCase : Tuple =inputs['''input_ids'''].to(snake_case__ )
UpperCAmelCase : Union[str, Any] =model.generate(
input_ids=snake_case__ , attention_mask=inputs['''attention_mask'''].to(snake_case__ ) , )
UpperCAmelCase : Tuple =tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(snake_case__ )
UpperCAmelCase : Dict =model.generate(input_ids=snake_case__ )
UpperCAmelCase : Union[str, Any] =inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCAmelCase : Optional[Any] =tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(snake_case__ )
UpperCAmelCase : Any =model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase : List[str] =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase : Optional[Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
UpperCAmelCase : Optional[Any] =tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
UpperCAmelCase : Tuple =[
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] =BioGptModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] =3
UpperCAmelCase : Dict =input_dict['''input_ids''']
UpperCAmelCase : List[str] =input_ids.ne(1 ).to(snake_case__ )
UpperCAmelCase : Dict =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] =BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str =3
UpperCAmelCase : Tuple ='''multi_label_classification'''
UpperCAmelCase : Union[str, Any] =input_dict['''input_ids''']
UpperCAmelCase : str =input_ids.ne(1 ).to(snake_case__ )
UpperCAmelCase : List[Any] =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any =BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : List[str] =model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase : Optional[int] =torch.tensor([[2, 4805, 9, 656, 21]] )
UpperCAmelCase : str =model(snake_case__ )[0]
UpperCAmelCase : Any =4_2384
UpperCAmelCase : Union[str, Any] =torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase : int =torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase : List[str] =BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(snake_case__ )
torch.manual_seed(0 )
UpperCAmelCase : str =tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(snake_case__ )
UpperCAmelCase : Dict =model.generate(
**snake_case__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case__ , )
UpperCAmelCase : Dict =tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
UpperCAmelCase : int =(
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(snake_case__ , snake_case__ )
| 78
|
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> float:
'''simple docstring'''
UpperCAmelCase : Any =x
UpperCAmelCase : List[str] =y
for step in range(__lowerCAmelCase ): # noqa: B007
UpperCAmelCase : int =a * a - b * b + x
UpperCAmelCase : Union[str, Any] =2 * a * b + y
UpperCAmelCase : Optional[int] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( __lowerCAmelCase )-> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def lowerCAmelCase_ ( __lowerCAmelCase )-> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__lowerCAmelCase , 1 , 1 ) )
def lowerCAmelCase_ ( __lowerCAmelCase = 8_00 , __lowerCAmelCase = 6_00 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , )-> Image.Image:
'''simple docstring'''
UpperCAmelCase : Dict =Image.new('''RGB''' , (image_width, image_height) )
UpperCAmelCase : str =img.load()
# loop through the image-coordinates
for image_x in range(__lowerCAmelCase ):
for image_y in range(__lowerCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase : Union[str, Any] =figure_width / image_width * image_height
UpperCAmelCase : Dict =figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase : str =figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase : int =get_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase : Any =get_color_coded_rgb(__lowerCAmelCase )
else:
UpperCAmelCase : Optional[int] =get_black_and_white_rgb(__lowerCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000 ) -> int:
_lowerCAmelCase , _lowerCAmelCase : Tuple = 1, 1
_lowerCAmelCase : Optional[Any] = 2
while True:
_lowerCAmelCase : Any = 0
_lowerCAmelCase : str = fa + fa
_lowerCAmelCase , _lowerCAmelCase : Any = fa, f
index += 1
for _ in str(_lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 44
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = metric_id
class a__ :
_a : Optional[int] = [MetricMock(snake_case__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
if "tmp_path" in args:
__lowerCAmelCase = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(SCREAMING_SNAKE_CASE_ , match="https://huggingface.co/docs/evaluate" ):
func(*SCREAMING_SNAKE_CASE_ )
| 92
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , __magic_name__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 62
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = VQModel
__a = """sample"""
@property
def lowercase ( self : Optional[int] , _lowerCamelCase : str=(32, 32) ):
_snake_case = 4
_snake_case = 3
_snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def lowercase ( self : List[Any] ):
return (3, 32, 32)
@property
def lowercase ( self : List[Any] ):
return (3, 32, 32)
def lowercase ( self : Dict ):
_snake_case = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : Any ):
pass
def lowercase ( self : Optional[int] ):
_snake_case , _snake_case = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
_snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase ( self : Dict ):
_snake_case = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_lowerCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_snake_case = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_snake_case = image.to(_lowerCamelCase )
with torch.no_grad():
_snake_case = model(_lowerCamelCase ).sample
_snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_snake_case = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
| 288
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __lowerCamelCase ):
for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 288
| 1
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(_SCREAMING_SNAKE_CASE )]
snake_case_ = []
def generate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _SCREAMING_SNAKE_CASE )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
snake_case_ , snake_case_ = arr[k - 1], arr[i]
else: # k is odd
snake_case_ , snake_case_ = arr[k - 1], arr[0]
generate(k - 1 , _SCREAMING_SNAKE_CASE )
generate(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 233
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : int = 32
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" ) -> Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
# Initialize accelerator
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config["""lr"""]
snake_case_ = int(config["""num_epochs"""] )
snake_case_ = int(config["""seed"""] )
snake_case_ = int(config["""batch_size"""] )
snake_case_ = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case_ = 1
snake_case_ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
snake_case_ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load("""glue""" , """mrpc""" )
snake_case_ = 0
snake_case_ = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ , snake_case_ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
snake_case_ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( ) -> int:
snake_case_ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 , help="""Number of train epochs.""" , )
snake_case_ = parser.parse_args()
snake_case_ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# set torch weights for 1-to-1 comparison
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# set torch weights for 1-to-1 comparison
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
snake_case_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# layernorm 1
snake_case_ = weights[0][0][0]
snake_case_ = np.asarray(layer_norm_a[0] )
snake_case_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# lsh weights + output
snake_case_ = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
# intermediate weighs
snake_case_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
snake_case_ = intermediate_weights[2]
# layernorm 2
snake_case_ = np.asarray(intermediate_weights[0][0] )
snake_case_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate dense
snake_case_ = np.asarray(intermediate_weights[1][0] )
snake_case_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate out
snake_case_ = np.asarray(intermediate_weights[4][0] )
snake_case_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# reformer model
snake_case_ = torch_model.reformer
# word embeds
snake_case_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# output layer norm
snake_case_ = np.asarray(weights[7][0] )
snake_case_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# output embeddings
snake_case_ = np.asarray(weights[9][0] )
snake_case_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Initialise PyTorch model
snake_case_ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case_ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
snake_case_ = pickle.load(SCREAMING_SNAKE_CASE__ )['''weights''']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 8
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str:
super().__init__(
split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , )
def snake_case__( self : int ) ->Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 8
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
snake_case_ = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """beit"""
def __init__( self :List[str] , lowercase_ :List[Any]=81_92 , lowercase_ :str=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :Tuple="gelu" , lowercase_ :Any=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :Dict=0.02 , lowercase_ :int=1E-12 , lowercase_ :List[Any]=2_24 , lowercase_ :Dict=16 , lowercase_ :List[Any]=3 , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Optional[Any]=False , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :str=True , lowercase_ :List[str]=[3, 5, 7, 11] , lowercase_ :Optional[int]=[1, 2, 3, 6] , lowercase_ :str=True , lowercase_ :int=0.4 , lowercase_ :Union[str, Any]=2_56 , lowercase_ :int=1 , lowercase_ :Tuple=False , lowercase_ :Optional[int]=2_55 , **lowercase_ :str , ) -> Any:
super().__init__(**lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_mask_token
UpperCAmelCase = use_absolute_position_embeddings
UpperCAmelCase = use_relative_position_bias
UpperCAmelCase = use_shared_relative_position_bias
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase = out_indices
UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase = use_auxiliary_head
UpperCAmelCase = auxiliary_loss_weight
UpperCAmelCase = auxiliary_channels
UpperCAmelCase = auxiliary_num_convs
UpperCAmelCase = auxiliary_concat_input
UpperCAmelCase = semantic_loss_ignore_index
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCAmelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self :Tuple ) -> float:
return 1E-4
| 78
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = IFInpaintingPipeline
__A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__A = PipelineTesterMixin.required_optional_params - {"latents"}
def lowercase__ ( self : int ):
"""simple docstring"""
return self._get_dummy_components()
def lowercase__ ( self : Optional[int] , lowercase : List[str] , lowercase : str=0 ):
"""simple docstring"""
if str(lowercase ).startswith("mps" ):
lowercase_ :Any = torch.manual_seed(lowercase )
else:
lowercase_ :int = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowercase_ :str = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
lowercase_ :List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
lowercase_ :Dict = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase__ ( self : str ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self : Any ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase__ ( self : str ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self : Dict ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 147
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : int , lowercase : Optional[int] , lowercase : List[Any]=13 , lowercase : Dict=30 , lowercase : Dict=2 , lowercase : Tuple=3 , lowercase : Dict=True , lowercase : Dict=True , lowercase : Tuple=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[Any]=37 , lowercase : Union[str, Any]="gelu" , lowercase : List[str]=0.1 , lowercase : List[Any]=0.1 , lowercase : List[str]=10 , lowercase : Any=0.02 , lowercase : Union[str, Any]=3 , lowercase : Tuple=None , lowercase : List[str]=2 , ):
"""simple docstring"""
lowercase_ :Union[str, Any] = parent
lowercase_ :Optional[int] = batch_size
lowercase_ :Tuple = image_size
lowercase_ :Any = patch_size
lowercase_ :List[Any] = num_channels
lowercase_ :Optional[Any] = is_training
lowercase_ :str = use_labels
lowercase_ :Any = hidden_size
lowercase_ :Optional[int] = num_hidden_layers
lowercase_ :List[Any] = num_attention_heads
lowercase_ :str = intermediate_size
lowercase_ :Optional[int] = hidden_act
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Any = type_sequence_label_size
lowercase_ :Dict = initializer_range
lowercase_ :List[Any] = scope
lowercase_ :List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ :str = (image_size // patch_size) ** 2
lowercase_ :Any = num_patches + 2
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ :Optional[Any] = None
if self.use_labels:
lowercase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Any = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self : Tuple , lowercase : int , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Optional[int] = TFDeiTModel(config=lowercase )
lowercase_ :str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str , lowercase : str , lowercase : Optional[Any] , lowercase : str ):
"""simple docstring"""
lowercase_ :List[str] = TFDeiTForMaskedImageModeling(config=lowercase )
lowercase_ :Optional[int] = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ :List[str] = 1
lowercase_ :List[str] = TFDeiTForMaskedImageModeling(lowercase )
lowercase_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :List[str] = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : List[str] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.type_sequence_label_size
lowercase_ :str = TFDeiTForImageClassification(lowercase )
lowercase_ :str = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ :Optional[int] = 1
lowercase_ :Optional[int] = TFDeiTForImageClassification(lowercase )
lowercase_ :List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :int = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :List[Any] = TFDeiTModelTester(self )
lowercase_ :List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ , lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ :Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Dense ) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ , lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Tuple = model_class(lowercase )
lowercase_ :Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :Tuple = [*signature.parameters.keys()]
lowercase_ :Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def lowercase__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Any=False ):
"""simple docstring"""
lowercase_ :Dict = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :List[Any] = TFDeiTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase_ ( ):
lowercase_ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Dict ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
lowercase_ :Optional[int] = self.default_image_processor
lowercase_ :Union[str, Any] = prepare_img()
lowercase_ :Dict = image_processor(images=lowercase , return_tensors="tf" )
# forward pass
lowercase_ :Union[str, Any] = model(**lowercase )
# verify the logits
lowercase_ :Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
lowercase_ :Dict = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
| 147
| 1
|
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 1
while len(SCREAMING_SNAKE_CASE__ ) < 1E6:
constant.append(str(SCREAMING_SNAKE_CASE__ ) )
i += 1
__lowerCamelCase = """""".join(SCREAMING_SNAKE_CASE__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 12
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62
| 0
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__UpperCAmelCase :Tuple = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _a ( _lowercase : Tuple , _lowercase : int ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def _a ( _lowercase : Dict ):
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_lowercase )
def _a ( _lowercase : Optional[int] , _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : Any = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Tuple = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : Tuple = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_lowercase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_lowercase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_lowercase ) )
__UpperCAmelCase : Dict = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_lowercase ) )
__UpperCAmelCase : int = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='''session''' )
def _a ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _a ( _lowercase : Optional[Any] ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _lowercase )
@pytest.fixture
def _a ( _lowercase : List[str] ):
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _lowercase )
| 240
|
'''simple docstring'''
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
__UpperCAmelCase : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 240
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] ):
__lowercase : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
__lowercase : str = downstream_dict["""projector.weight"""]
__lowercase : Any = downstream_dict["""projector.bias"""]
__lowercase : Dict = downstream_dict["""model.post_net.linear.weight"""]
__lowercase : int = downstream_dict["""model.post_net.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ):
__lowercase : Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
__lowercase : List[str] = downstream_dict["""model.linear.weight"""]
__lowercase : Optional[int] = downstream_dict["""model.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ):
__lowercase : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
__lowercase : Tuple = downstream_dict["""connector.weight"""]
__lowercase : Dict = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase : Optional[Any] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase : Dict = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__lowercase : Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__lowercase : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__lowercase : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__lowercase : Optional[int] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ):
__lowercase : int = torch.load(lowerCAmelCase_ , map_location="""cpu""" )
__lowercase : Optional[int] = checkpoint["""Downstream"""]
__lowercase : List[str] = UniSpeechSatConfig.from_pretrained(lowerCAmelCase_ )
__lowercase : int = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ )
__lowercase : int = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__lowercase : Any = convert_classification(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
__lowercase : List[str] = convert_diarization(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif arch.endswith("""ForXVector""" ):
__lowercase : List[str] = convert_xvector(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowerCAmelCase_ )
hf_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCamelCase : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 233
|
# flake8: noqa
# Lint as: python3
lowerCamelCase : Optional[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 233
| 1
|
from __future__ import annotations
from random import choice
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
return choice(UpperCAmelCase_)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Optional[int] = random_pivot(UpperCAmelCase_)
# partition based on pivot
# linear time
__snake_case: List[Any] = [e for e in lst if e < pivot]
__snake_case: Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCAmelCase_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCAmelCase_) < k - 1:
return kth_number(UpperCAmelCase_ , k - len(UpperCAmelCase_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(UpperCAmelCase_ , UpperCAmelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11
| 1
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ = Lock()
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowercase = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowercase = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
for i in range(1 , len(__a ) - 1 ):
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
__lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _A ( ):
"""simple docstring"""
__lowercase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*__a )
__lowercase = odd_even_transposition(__a )
print('''Sorted List\n''' )
print(*__a )
if __name__ == "__main__":
main()
| 355
|
'''simple docstring'''
lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def _A ( ):
"""simple docstring"""
__lowercase = input('''Enter message: ''' )
__lowercase = input('''Enter key [alphanumeric]: ''' )
__lowercase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowercase = '''encrypt'''
__lowercase = encrypt_message(A__ , A__ )
elif mode.lower().startswith('''d''' ):
__lowercase = '''decrypt'''
__lowercase = decrypt_message(A__ , A__ )
print(F"\n{mode.title()}ed message:" )
print(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''encrypt''' )
def _A ( A__ , A__ ):
"""simple docstring"""
return translate_message(A__ , A__ , '''decrypt''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = 0
__lowercase = key.upper()
for symbol in message:
__lowercase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A__ ):
__lowercase = 0
else:
translated.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
main()
| 52
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147
|
import os
# Precomputes a list of the 100 first triangular numbers
a : Optional[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = os.path.dirname(os.path.realpath(lowerCAmelCase__ ) )
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , """words.txt""" )
UpperCAmelCase_: int = """"""
with open(lowerCAmelCase__ ) as f:
UpperCAmelCase_: str = f.readline()
UpperCAmelCase_: Optional[int] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCAmelCase_: Union[str, Any] = [
word
for word in [sum(ord(lowerCAmelCase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 147
| 1
|
from math import pi, sqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(__SCREAMING_SNAKE_CASE ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(__SCREAMING_SNAKE_CASE )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def SCREAMING_SNAKE_CASE ( ) -> str:
assert gamma(0.5 ) == sqrt(__SCREAMING_SNAKE_CASE )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : Dict =1.0
while num:
lowerCamelCase : List[str] =float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''')
| 370
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Any = len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ : Optional[int] = i + 1
else:
UpperCamelCase__ : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 196
| 0
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
snake_case : Union[str, Any] = True
from torch.cuda.amp import autocast
snake_case : Optional[int] = logging.getLogger(__name__)
def __lowercase ( __lowerCAmelCase : int=None , __lowerCAmelCase : Any=None ):
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class snake_case_ :
UpperCAmelCase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ : Optional[bool] = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
UpperCAmelCase__ : Optional[float] = field(
default=0.0_5 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
UpperCAmelCase__ : Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class snake_case_ :
UpperCAmelCase__ : Optional[str] = field(
default=lowerCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase__ : Optional[str] = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ : List[str] = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class snake_case_ :
UpperCAmelCase__ : WavaVecaProcessor
UpperCAmelCase__ : Union[bool, str] = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[int] = None
def __call__( self :Dict ,__snake_case :List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
a__ = [{'input_values': feature['input_values']} for feature in features]
a__ = [{'input_ids': feature['labels']} for feature in features]
a__ = self.processor.pad(
__snake_case ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
a__ = self.processor.pad(
labels=__snake_case ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors='pt' ,)
# replace padding with -100 to ignore loss correctly
a__ = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) ,-1_00 )
a__ = labels
return batch
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
a__ = self._prepare_inputs(__snake_case )
if self.use_amp:
with autocast():
a__ = self.compute_loss(__snake_case ,__snake_case )
else:
a__ = self.compute_loss(__snake_case ,__snake_case )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
a__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
a__ = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
a__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(__snake_case ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__snake_case )
else:
loss.backward()
return loss.detach()
def __lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
a__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
a__ = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
a__ = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
a__ = F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(__lowerCAmelCase : Dict ):
a__ = re.sub(__lowerCAmelCase , '' , batch['sentence'] ).lower() + ' '
return batch
a__ = train_dataset.map(__lowerCAmelCase , remove_columns=['sentence'] )
a__ = eval_dataset.map(__lowerCAmelCase , remove_columns=['sentence'] )
def extract_all_chars(__lowerCAmelCase : List[str] ):
a__ = ' '.join(batch['text'] )
a__ = list(set(__lowerCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
a__ = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=train_dataset.column_names , )
a__ = train_dataset.map(
__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=-1 , keep_in_memory=__lowerCAmelCase , remove_columns=eval_dataset.column_names , )
a__ = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
a__ = {v: k for k, v in enumerate(__lowerCAmelCase )}
a__ = vocab_dict[' ']
del vocab_dict[" "]
a__ = len(__lowerCAmelCase )
a__ = len(__lowerCAmelCase )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
a__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase )
a__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
a__ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
a__ = min(len(__lowerCAmelCase ) , data_args.max_train_samples )
a__ = train_dataset.select(range(__lowerCAmelCase ) )
if data_args.max_val_samples is not None:
a__ = eval_dataset.select(range(data_args.max_val_samples ) )
a__ = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__lowerCAmelCase : List[str] ):
a__ , a__ = torchaudio.load(batch['path'] )
a__ = resampler(__lowerCAmelCase ).squeeze().numpy()
a__ = 1_6_0_0_0
a__ = batch['text']
return batch
a__ = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
a__ = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__lowerCAmelCase : Optional[int] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
a__ = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(__lowerCAmelCase )
return batch
a__ = train_dataset.map(
__lowerCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
a__ = eval_dataset.map(
__lowerCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
a__ = datasets.load_metric('wer' )
def compute_metrics(__lowerCAmelCase : Optional[Any] ):
a__ = pred.predictions
a__ = np.argmax(__lowerCAmelCase , axis=-1 )
a__ = processor.tokenizer.pad_token_id
a__ = processor.batch_decode(__lowerCAmelCase )
# we do not want to group tokens when computing the metrics
a__ = processor.batch_decode(pred.label_ids , group_tokens=__lowerCAmelCase )
a__ = wer_metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
a__ = DataCollatorCTCWithPadding(processor=__lowerCAmelCase , padding=__lowerCAmelCase )
# Initialize our Trainer
a__ = CTCTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
a__ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
a__ = model_args.model_name_or_path
else:
a__ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
a__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
a__ = train_result.metrics
a__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase )
)
a__ = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics('train' , __lowerCAmelCase )
trainer.save_metrics('train' , __lowerCAmelCase )
trainer.save_state()
# Evaluation
a__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ = trainer.evaluate()
a__ = data_args.max_val_samples if data_args.max_val_samples is not None else len(__lowerCAmelCase )
a__ = min(__lowerCAmelCase , len(__lowerCAmelCase ) )
trainer.log_metrics('eval' , __lowerCAmelCase )
trainer.save_metrics('eval' , __lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 240
|
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float(moles / volume ) * nfactor )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 240
| 1
|
'''simple docstring'''
from __future__ import annotations
import bisect
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : int = 0 , __lowercase : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_UpperCAmelCase = len(__lowercase )
while lo < hi:
_UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase = mid + 1
else:
_UpperCAmelCase = mid
return lo
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : int = 0 , __lowercase : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_UpperCAmelCase = len(__lowercase )
while lo < hi:
_UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase = mid + 1
else:
_UpperCAmelCase = mid
return lo
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : int = 0 , __lowercase : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__lowercase , __lowercase , __lowercase , __lowercase ) , __lowercase )
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : int = 0 , __lowercase : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__lowercase , __lowercase , __lowercase , __lowercase ) , __lowercase )
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int ) -> int | None:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = len(__lowercase ) - 1
while left <= right:
_UpperCAmelCase = left + (right - left) // 2
_UpperCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase = midpoint - 1
else:
_UpperCAmelCase = midpoint + 1
return None
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int ) -> int | None:
'''simple docstring'''
_UpperCAmelCase = bisect.bisect_left(__lowercase , __lowercase )
if index != len(__lowercase ) and sorted_collection[index] == item:
return index
return None
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : int , __lowercase : int ) -> int | None:
'''simple docstring'''
if right < left:
return None
_UpperCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__lowercase , __lowercase , __lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(__lowercase , __lowercase , midpoint + 1 , __lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = input('''Enter numbers separated by comma:\n''').strip()
__SCREAMING_SNAKE_CASE :List[Any] = sorted(int(item) for item in user_input.split(''','''))
__SCREAMING_SNAKE_CASE :Dict = int(input('''Enter a single number to be found in the list:\n'''))
__SCREAMING_SNAKE_CASE :Tuple = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 156
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :Dict = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__lowerCAmelCase : Tuple = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__lowerCAmelCase : str = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__lowerCAmelCase : Optional[int] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__lowerCAmelCase : Dict = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__lowerCAmelCase : Union[str, Any] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=[1, 10, 100] , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Optional[Any]=3.0 ) -> List[str]:
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=UpperCamelCase__ ) as executor:
__magic_name__ = []
__magic_name__ = Counter()
__magic_name__ = 0
__magic_name__ = defaultdict(UpperCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
for candidate in candidates:
__magic_name__ = candidate + """\n""" + test_case
__magic_name__ = (test_program, timeout, task_id, completion_id[task_id])
__magic_name__ = executor.submit(UpperCamelCase__ , *UpperCamelCase__ )
futures.append(UpperCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase__ ):
__magic_name__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__magic_name__ , __magic_name__ = [], []
for result in results.values():
result.sort()
__magic_name__ = [r[1]["""passed"""] for r in result]
total.append(len(UpperCamelCase__ ) )
correct.append(sum(UpperCamelCase__ ) )
__magic_name__ = np.array(UpperCamelCase__ )
__magic_name__ = np.array(UpperCamelCase__ )
__magic_name__ = k
__magic_name__ = {F'''pass@{k}''': estimate_pass_at_k(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a__ ( A_, A_, A_ ):
'''simple docstring'''
def estimator(A_, A_, A_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(A_, A_ ):
__magic_name__ = itertools.repeat(A_, len(A_ ) )
else:
assert len(A_ ) == len(A_ )
__magic_name__ = iter(A_ )
return np.array([estimator(int(A_ ), int(A_ ), A_ ) for n, c in zip(A_, A_ )] )
| 88
|
"""simple docstring"""
import math
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be > 0"
raise ValueError(_SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ :Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2
lowerCAmelCase__ :Optional[Any] = [3, 5]
lowerCAmelCase__ :Optional[Any] = 2
lowerCAmelCase__ :List[str] = 3
for block in range(1 , _SCREAMING_SNAKE_CASE ):
for _ in range(_SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__A = 0
try:
__A = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 293
| 0
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_A = 'Usage of script: script_name <size_of_canvas:int>'
_A = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase__ ( a__ : Optional[Any] ) -> list[list[bool]]:
UpperCamelCase_ = [[False for i in range(__snake_case )] for j in range(__snake_case )]
return canvas
def lowerCamelCase__ ( a__ : Any ) -> None:
for i, row in enumerate(__snake_case ):
for j, _ in enumerate(__snake_case ):
UpperCamelCase_ = bool(random.getrandbits(1 ) )
def lowerCamelCase__ ( a__ : Optional[int] ) -> list[list[bool]]:
UpperCamelCase_ = np.array(__snake_case )
UpperCamelCase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__snake_case ):
for c, pt in enumerate(__snake_case ):
UpperCamelCase_ = __judge_point(
__snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCamelCase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCamelCase_ = current_canvas.tolist()
return return_canvas
def lowerCamelCase__ ( a__ : int , a__ : Optional[int] ) -> bool:
UpperCamelCase_ = 0
UpperCamelCase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCamelCase_ = pt
if pt:
if alive < 2:
UpperCamelCase_ = False
elif alive == 2 or alive == 3:
UpperCamelCase_ = True
elif alive > 3:
UpperCamelCase_ = False
else:
if alive == 3:
UpperCamelCase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_A = int(sys.argv[1])
# main working structure of this module.
_A = create_canvas(canvas_size)
seed(c)
_A = plt.subplots()
fig.show()
_A = ListedColormap(['''w''', '''k'''])
try:
while True:
_A = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 364
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = """align_text_model"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = """align_vision_model"""
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 6_0_0 , __UpperCamelCase = 2.0 , __UpperCamelCase = 3.1 , __UpperCamelCase = 8 , __UpperCamelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __UpperCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __UpperCamelCase = [] , __UpperCamelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase = 0.25 , __UpperCamelCase = "swish" , __UpperCamelCase = 2_5_6_0 , __UpperCamelCase = "mean" , __UpperCamelCase = 0.02 , __UpperCamelCase = 0.001 , __UpperCamelCase = 0.99 , __UpperCamelCase = 0.2 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = width_coefficient
UpperCamelCase_ = depth_coefficient
UpperCamelCase_ = depth_divisor
UpperCamelCase_ = kernel_sizes
UpperCamelCase_ = in_channels
UpperCamelCase_ = out_channels
UpperCamelCase_ = depthwise_padding
UpperCamelCase_ = strides
UpperCamelCase_ = num_block_repeats
UpperCamelCase_ = expand_ratios
UpperCamelCase_ = squeeze_expansion_ratio
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = pooling_type
UpperCamelCase_ = initializer_range
UpperCamelCase_ = batch_norm_eps
UpperCamelCase_ = batch_norm_momentum
UpperCamelCase_ = drop_connect_rate
UpperCamelCase_ = sum(__UpperCamelCase ) * 4
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = """align"""
A__ : int = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=6_4_0 , __UpperCamelCase=1.0 , __UpperCamelCase=0.02 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if text_config is None:
UpperCamelCase_ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
UpperCamelCase_ = AlignTextConfig(**__UpperCamelCase )
UpperCamelCase_ = AlignVisionConfig(**__UpperCamelCase )
UpperCamelCase_ = projection_dim
UpperCamelCase_ = temperature_init_value
UpperCamelCase_ = initializer_range
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 261
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case__ , max_perimeter + 1 ):
A : Union[str, Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case__ ):
A : Optional[int] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCAmelCase_ ( snake_case__ = 1000 ):
'''simple docstring'''
A : str = pythagorean_triple(snake_case__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 3
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = KandinskyInpaintPipeline
_UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase :Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase :Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :int = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase : Optional[int] = MultilingualCLIP(A_ )
UpperCamelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.dummy_text_encoder
UpperCamelCase : str = self.dummy_tokenizer
UpperCamelCase : List[Any] = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Optional[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : str = 0
if str(A_ ).startswith("mps" ):
UpperCamelCase : int = torch.manual_seed(A_ )
else:
UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu"
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : Tuple = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = "a hat"
UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : Optional[Any] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Dict = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 52
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
for i in range(1, n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 352
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 0
|
"""simple docstring"""
from math import loga
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: int = {}
if "candidate_labels" in kwargs:
lowercase__: Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowercase__: List[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a photo of {}." ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = load_image(lowerCAmelCase__ )
lowercase__: Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__: Tuple = candidate_labels
lowercase__: List[str] = [hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
lowercase__: Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
lowercase__: str = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: str = model_inputs.pop('candidate_labels' )
lowercase__: List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
lowercase__: Any = text_inputs[0]
else:
# Batching case.
lowercase__: Optional[int] = text_inputs[0][0]
lowercase__: Tuple = self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Any = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = model_outputs.pop('candidate_labels' )
lowercase__: Dict = model_outputs['logits'][0]
if self.framework == "pt":
lowercase__: Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__: Dict = probs.tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Dict = [scores]
elif self.framework == "tf":
lowercase__: Optional[int] = stable_softmax(lowerCAmelCase__ , axis=-1 )
lowercase__: Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowercase__: List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 196
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowercase__ : Optional[int] = HfApi()
lowercase__ : Dict = {}
# fmt: off
lowercase__ : List[str] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowercase__ : Tuple = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowercase__ : Optional[Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowercase__ : List[Any] = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowercase__ : Dict = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowercase__ : Optional[int] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowercase__ : List[Any] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowercase__ : List[str] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowercase__ : Dict = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowercase__ : Optional[int] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowercase__ : List[str] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowercase__ : Optional[int] = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowercase__ : int = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowercase__ : int = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowercase__ : List[Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowercase__ : str = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowercase__ : int = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('''CompVis'''):
lowercase__ : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowercase__ : Tuple = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowercase__ : List[str] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowercase__ : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowercase__ : Tuple = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 190
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int | str] ) -> None:
create_state_space_tree(__snake_case , [] , 0 , [0 for i in range(len(__snake_case ) )] )
def _lowerCAmelCase ( __snake_case : list[int | str] , __snake_case : list[int | str] , __snake_case : int , __snake_case : list[int] , ) -> None:
if index == len(__snake_case ):
print(__snake_case )
return
for i in range(len(__snake_case ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A : Any = True
create_state_space_tree(__snake_case , __snake_case , index + 1 , __snake_case )
current_sequence.pop()
__A : Any = False
lowercase__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowercase__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 190
| 1
|
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _snake_case : int , _snake_case : str , _snake_case : Optional[int] ):
__lowercase : Tuple = None
__lowercase : Dict = None
__lowercase : Any = graph
self._normalize_graph(_snake_case , _snake_case )
__lowercase : Optional[Any] = len(_snake_case )
__lowercase : str = None
def snake_case_ ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : int ):
if sources is int:
__lowercase : Optional[int] = [sources]
if sinks is int:
__lowercase : Tuple = [sinks]
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
return
__lowercase : int = sources[0]
__lowercase : Any = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_snake_case ) > 1 or len(_snake_case ) > 1:
__lowercase : str = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__lowercase : str = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__lowercase : Union[str, Any] = max_input_flow
__lowercase : List[str] = 0
__lowercase : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__lowercase : Union[str, Any] = max_input_flow
__lowercase : Tuple = size - 1
def snake_case_ ( self : int ):
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case_ ( self : Any , _snake_case : Union[str, Any] ):
__lowercase : Dict = algorithm(self )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , _snake_case : int ):
__lowercase : List[str] = flow_network
__lowercase : Tuple = flow_network.verticesCount
__lowercase : Tuple = flow_network.sourceIndex
__lowercase : int = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__lowercase : Union[str, Any] = flow_network.graph
__lowercase : Any = False
def snake_case_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__lowercase : int = True
def snake_case_ ( self : List[str] ):
pass
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : str ):
super().__init__(_snake_case )
# use this to save your result
__lowercase : List[str] = -1
def snake_case_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int ):
super().__init__(_snake_case )
__lowercase : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
__lowercase : Union[str, Any] = [0] * self.verticies_count
__lowercase : Union[str, Any] = [0] * self.verticies_count
def snake_case_ ( self : str ):
__lowercase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__lowercase : int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__lowercase : Any = 0
while i < len(_snake_case ):
__lowercase : Dict = vertices_list[i]
__lowercase : Dict = self.heights[vertex_index]
self.process_vertex(_snake_case )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_snake_case ) )
__lowercase : Dict = 0
else:
i += 1
__lowercase : List[Any] = sum(self.preflow[self.source_index] )
def snake_case_ ( self : Any , _snake_case : Optional[int] ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_snake_case , _snake_case )
self.relabel(_snake_case )
def snake_case_ ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any ):
__lowercase : int = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case_ ( self : List[Any] , _snake_case : Tuple ):
__lowercase : Dict = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__lowercase : Any = self.heights[to_index]
if min_height is not None:
__lowercase : List[Any] = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = [0]
__lowerCAmelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Union[str, Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[Any] = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 156
|
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[int] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = 0
__lowercase : Optional[Any] = 0
__lowercase : str = 0
# compute the shape of the output matrix
__lowercase : Optional[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowercase : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowercase : Optional[int] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : Any = 0
__lowercase : List[Any] = 0
return updated_arr
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
__lowercase : Optional[Any] = np.array(__lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__lowercase : int = 0
__lowercase : str = 0
__lowercase : List[str] = 0
__lowercase : Dict = 0
# compute the shape of the output matrix
__lowercase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowercase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowercase : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowercase : int = 0
__lowercase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__lowerCAmelCase : List[Any] = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 156
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = (IPNDMScheduler,)
lowerCamelCase_ = (('''num_inference_steps''', 50),)
def lowerCAmelCase_ ( self : Dict , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {"num_train_timesteps": 1_0_0_0}
config.update(**__lowerCamelCase )
return config
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Tuple=0 , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.dummy_sample
_SCREAMING_SNAKE_CASE = 0.1 * sample
_SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
_SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
_SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
_SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[int]=0 , **__lowerCamelCase : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.dummy_sample
_SCREAMING_SNAKE_CASE = 0.1 * sample
_SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
if time_step is None:
_SCREAMING_SNAKE_CASE = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
_SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
_SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 1_0
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.dummy_sample
_SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
_SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_SCREAMING_SNAKE_CASE = dummy_past_residuals[:]
_SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
_SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
_SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase , time_step=__lowerCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=__lowerCamelCase )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.full_loop()
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 111
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
if n == 1 or not isinstance(__A , __A ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE = len(str(fibonacci(__A ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( __A : int = 10_00 ) -> int:
return fibonacci_digits_index(__A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 111
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = '''bert'''
def __init__( self : List[str] , _snake_case : int=30522 , _snake_case : List[Any]=768 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[Any]=12 , _snake_case : Dict=3072 , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : str=0.1 , _snake_case : int=512 , _snake_case : int=2 , _snake_case : int=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Dict=0 , _snake_case : int="absolute" , _snake_case : Any=True , _snake_case : List[str]=None , **_snake_case : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class __snake_case ( a ):
@property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 51
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=None ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = {}
for old_key in state_dict.keys():
UpperCAmelCase_ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCAmelCase_ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
UpperCAmelCase_ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCAmelCase_ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCAmelCase_ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCAmelCase_ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCAmelCase_ = key.replace("final_layer_norm" , "ff_layer_norm" )
UpperCAmelCase_ = state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Any = WEIGHTS_NAME ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
for expert in range(__lowerCamelCase ):
UpperCAmelCase_ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(__lowerCamelCase ):
UpperCAmelCase_ = torch.load(__lowerCamelCase )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ = rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = os.path.join(
__lowerCamelCase , weights_name.replace(".bin" , f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCamelCase )[0]].dtype )
# Add the last block
UpperCAmelCase_ = os.path.join(__lowerCamelCase , weights_name.replace(".bin" , f"""-{len(__lowerCamelCase )+1:05d}-of-???.bin""" ) )
UpperCAmelCase_ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__lowerCamelCase )
UpperCAmelCase_ = rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase_ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCamelCase ) == 1:
UpperCAmelCase_ = os.path.join(__lowerCamelCase , __lowerCamelCase )
torch.save(__lowerCamelCase , __lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCamelCase , __lowerCamelCase )
# Otherwise, let's build the index
UpperCAmelCase_ = {}
for idx, shard in enumerate(__lowerCamelCase ):
UpperCAmelCase_ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin""" )
UpperCAmelCase_ = os.path.join(__lowerCamelCase , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
for key in shard:
UpperCAmelCase_ = shard_file
# Add the metadata
UpperCAmelCase_ = {"total_size": total_size}
UpperCAmelCase_ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + "\n"
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_args()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_: Optional[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
SCREAMING_SNAKE_CASE_: Any =NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
SCREAMING_SNAKE_CASE_: Tuple =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 351
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: List[str] =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 106
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if index == r:
for j in range(SCREAMING_SNAKE_CASE ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase = arr[i]
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 46
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323
| 0
|
'''simple docstring'''
def _a( UpperCamelCase__ : str, UpperCamelCase__ : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(UpperCamelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__ : List[Any] =repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ : List[Any] =sorted([comment for comment in issue.get_comments()], key=lambda UpperCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 222
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Any = parent
__A : Dict = batch_size
__A : Optional[int] = seq_length
__A : List[Any] = is_training
__A : Optional[Any] = use_input_mask
__A : List[str] = use_token_type_ids
__A : List[Any] = use_labels
__A : Union[str, Any] = vocab_size
__A : str = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : List[Any] = intermediate_size
__A : str = hidden_act
__A : List[str] = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : List[Any] = type_vocab_size
__A : Tuple = type_sequence_label_size
__A : Union[str, Any] = initializer_range
__A : List[str] = num_labels
__A : Union[str, Any] = num_choices
__A : List[Any] = scope
__A : str = vocab_size - 1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : int = None
if self.use_input_mask:
__A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[Any] = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : str = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : List[Any] = self.prepare_config_and_inputs()
__A : str = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = GPTNeoXModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)
__A : Any = model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = True
__A : Optional[Any] = GPTNeoXModel(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = GPTNeoXForCausalLM(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : int = self.num_labels
__A : Any = GPTNeoXForQuestionAnswering(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.num_labels
__A : Optional[Any] = GPTNeoXForSequenceClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : Union[str, Any] = GPTNeoXForTokenClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = True
__A : str = GPTNeoXForCausalLM(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
# first forward pass
__A : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase)
__A : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : int = ids_tensor((self.batch_size, 3) , config.vocab_size)
__A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__A : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1)
__A : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase)
__A : Tuple = output_from_no_past['hidden_states'][0]
__A : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
__A : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__A : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.prepare_config_and_inputs()
__A ,__A ,__A ,__A : Any = config_and_inputs
__A : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = GPTNeoXModelTester(self)
__A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=64 , num_attention_heads=8)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__A : Dict = None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@unittest.skip(reason='Feed forward chunking is not implemented')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common()
__A : Tuple = ids_tensor([1, 10] , config.vocab_size)
__A : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__A : Union[str, Any] = GPTNeoXModel(_UpperCAmelCase)
original_model.to(_UpperCAmelCase)
original_model.eval()
__A : Tuple = original_model(_UpperCAmelCase).last_hidden_state
__A : Optional[int] = original_model(_UpperCAmelCase).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__A : Union[str, Any] = {'type': scaling_type, 'factor': 10.0}
__A : int = GPTNeoXModel(_UpperCAmelCase)
scaled_model.to(_UpperCAmelCase)
scaled_model.eval()
__A : List[str] = scaled_model(_UpperCAmelCase).last_hidden_state
__A : Any = scaled_model(_UpperCAmelCase).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5))
else:
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-5))
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped')
for checkpointing in [True, False]:
__A : Dict = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped')
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_UpperCAmelCase)
__A : List[str] = tokenizer('My favorite food is' , return_tensors='pt').to(_UpperCAmelCase)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__A : Any = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__A : Optional[int] = model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20)
__A : Tuple = tokenizer.batch_decode(_UpperCAmelCase)[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
| 190
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]:
__A : List[str] = k_size // 2
__A ,__A : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__A : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(__snake_case ) + square(__snake_case )) / (2 * square(__snake_case )) )
return g
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : int ) -> Union[str, Any]:
__A ,__A : Tuple = image.shape[0], image.shape[1]
# dst image height and width
__A : Tuple = height - k_size + 1
__A : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__A : str = zeros((dst_height * dst_width, k_size * k_size) )
__A : Optional[Any] = 0
for i, j in product(range(__snake_case ) , range(__snake_case ) ):
__A : int = ravel(image[i : i + k_size, j : j + k_size] )
__A : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
__A : List[Any] = gen_gaussian_kernel(__snake_case , __snake_case )
__A : Any = ravel(__snake_case )
# reshape and get the dst image
__A : Dict = dot(__snake_case , __snake_case ).reshape(__snake_case , __snake_case ).astype(__snake_case )
return dst
if __name__ == "__main__":
# read original image
lowercase__ : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase__ : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase__ : Any = gaussian_filter(gray, 3, sigma=1)
lowercase__ : str = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 190
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return int(x / 2**2_0 )
class SCREAMING_SNAKE_CASE__ :
def __enter__( self : Optional[int] ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__snake_case : Dict = torch.cuda.memory_allocated()
return self
def __exit__( self : List[str] , *_lowerCAmelCase : Union[str, Any] ):
gc.collect()
torch.cuda.empty_cache()
__snake_case : Tuple = torch.cuda.memory_allocated()
__snake_case : List[Any] = torch.cuda.max_memory_allocated()
__snake_case : Union[str, Any] = bamb(self.end - self.begin )
__snake_case : Optional[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] = 1_6 , __SCREAMING_SNAKE_CASE : List[str] = "bert-base-cased" , __SCREAMING_SNAKE_CASE : Optional[int] = 3_2_0 , __SCREAMING_SNAKE_CASE : Tuple = 1_6_0 , ):
'''simple docstring'''
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(a_ )
__snake_case : int = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} )
def tokenize_function(__SCREAMING_SNAKE_CASE : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case : int = datasets.map(
a_ , batched=a_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(a_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__snake_case : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
__snake_case : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
# Initialize accelerator
__snake_case : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case : int = config["""lr"""]
__snake_case : Optional[int] = int(config["""num_epochs"""] )
__snake_case : Tuple = int(config["""seed"""] )
__snake_case : Optional[Any] = int(config["""batch_size"""] )
__snake_case : List[str] = args.model_name_or_path
set_seed(a_ )
__snake_case , __snake_case : List[str] = get_dataloaders(a_ , a_ , a_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case : str = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ )
# Instantiate optimizer
__snake_case : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__snake_case : int = optimizer_cls(params=model.parameters() , lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
__snake_case : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__snake_case : Optional[int] = 1
__snake_case : Any = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__snake_case : Dict = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
__snake_case : List[Any] = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Dict = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# We need to keep track of how many total steps we have iterated over
__snake_case : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
__snake_case : str = 0
# Now we train the model
__snake_case : int = {}
for epoch in range(a_ , a_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a_ ):
__snake_case : List[str] = model(**a_ )
__snake_case : Dict = outputs.loss
__snake_case : Dict = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__snake_case : Any = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(a_ , a_ )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=a_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=a_ , )
parser.add_argument(
"""--output_dir""" , type=a_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=a_ , default=a_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=a_ , default=3_2_0 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=a_ , default=1_6_0 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=a_ , default=1 , help="""Number of train epochs.""" , )
__snake_case : Tuple = parser.parse_args()
__snake_case : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 364
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """ibert"""
def __init__( self : Dict , A : Union[str, Any]=30_522 , A : List[Any]=768 , A : List[Any]=12 , A : Optional[int]=12 , A : Optional[Any]=3_072 , A : int="gelu" , A : str=0.1 , A : List[Any]=0.1 , A : Optional[Any]=512 , A : int=2 , A : Union[str, Any]=0.02 , A : List[str]=1E-12 , A : Optional[int]=1 , A : Optional[int]=0 , A : List[str]=2 , A : str="absolute" , A : Any=False , A : Optional[Any]="none" , **A : Any , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
__snake_case: List[Any] = vocab_size
__snake_case: Optional[Any] = hidden_size
__snake_case: List[str] = num_hidden_layers
__snake_case: Tuple = num_attention_heads
__snake_case: List[str] = hidden_act
__snake_case: Optional[Any] = intermediate_size
__snake_case: Tuple = hidden_dropout_prob
__snake_case: List[str] = attention_probs_dropout_prob
__snake_case: Any = max_position_embeddings
__snake_case: int = type_vocab_size
__snake_case: List[str] = initializer_range
__snake_case: List[Any] = layer_norm_eps
__snake_case: Optional[int] = position_embedding_type
__snake_case: str = quant_mode
__snake_case: Optional[int] = force_dequant
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
__snake_case: List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case: List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 111
|
from statistics import mean
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: List[Any] = 0
# Number of processes finished
__snake_case: Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case: Dict = [0] * no_of_process
# List to include calculation results
__snake_case: Tuple = [0] * no_of_process
# Sort by arrival time.
__snake_case: int = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
__snake_case: Any = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case: Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case: Any = arrival_time[i]
__snake_case: List[Any] = 0
# Index showing the location of the process being performed
__snake_case: Union[str, Any] = 0
# Saves the current response ratio.
__snake_case: Optional[Any] = 0
for i in range(0 , SCREAMING_SNAKE_CASE__):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case: Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case: Union[str, Any] = temp
__snake_case: Optional[int] = i
# Calculate the turn around time
__snake_case: Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case: Optional[int] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: Union[str, Any] = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = 5
__UpperCAmelCase : Tuple = ["A", "B", "C", "D", "E"]
__UpperCAmelCase : str = [1, 2, 3, 4, 5]
__UpperCAmelCase : Dict = [1, 2, 3, 4, 5]
__UpperCAmelCase : List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCAmelCase : List[str] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 111
| 1
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __UpperCamelCase ( _A ):
def wrapper(*_A , **_A ):
lowerCAmelCase_ = timeit.default_timer()
lowerCAmelCase_ = func(*UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase_ = timeit.default_timer() - starttime
return delta
lowerCAmelCase_ = func.__name__
return wrapper
def __UpperCamelCase ( _A , _A=100 , _A=None ):
lowerCAmelCase_ = []
lowerCAmelCase_ = seq_shapes or {}
for i in range(UpperCAmelCase_ ):
lowerCAmelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCAmelCase_ , _ArrayXD ):
lowerCAmelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCAmelCase_ , datasets.Value ):
if v.dtype == "string":
lowerCAmelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCAmelCase_ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCAmelCase_ , datasets.Sequence ):
while isinstance(UpperCAmelCase_ , datasets.Sequence ):
lowerCAmelCase_ = v.feature
lowerCAmelCase_ = seq_shapes[k]
lowerCAmelCase_ = np.random.rand(*UpperCAmelCase_ ).astype(v.dtype )
lowerCAmelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def __UpperCamelCase ( _A , _A , _A=100 , _A=None ):
lowerCAmelCase_ = generate_examples(UpperCAmelCase_ , num_examples=UpperCAmelCase_ , seq_shapes=UpperCAmelCase_ )
with ArrowWriter(features=UpperCAmelCase_ , path=UpperCAmelCase_ ) as writer:
for key, record in dummy_data:
lowerCAmelCase_ = features.encode_example(UpperCAmelCase_ )
writer.write(UpperCAmelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCAmelCase_ = datasets.Dataset.from_file(filename=UpperCAmelCase_ , info=datasets.DatasetInfo(features=UpperCAmelCase_ ) )
return dataset
| 353
|
import string
def __UpperCamelCase ( _A ):
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase_ = string.ascii_uppercase.find(_A )
lowerCAmelCase_ = num - key
if num < 0:
lowerCAmelCase_ = num + len(string.ascii_uppercase )
lowerCAmelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase_ = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def __UpperCamelCase ( ):
lowerCAmelCase_ = input('''Encrypted message: ''' )
lowerCAmelCase_ = message.upper()
decrypt(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 167
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowercase : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowercase : Optional[Any] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
lowercase : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
lowercase : Tuple = image.transpose(0 , 3 , 1 , 2 )
lowercase : Dict = 2.0 * image - 1.0
lowercase : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
lowercase : Any = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9995 ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
lowercase : Optional[Any] = True
lowercase : Any = va.device
lowercase : Tuple = va.cpu().numpy()
lowercase : Dict = va.cpu().numpy()
lowercase : Any = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
lowercase : Any = (1 - t) * va + t * va
else:
lowercase : int = np.arccos(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.sin(SCREAMING_SNAKE_CASE__ )
lowercase : str = theta_a * t
lowercase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
lowercase : int = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase : int = sin_theta_t / sin_theta_a
lowercase : Dict = sa * va + sa * va
if inputs_are_torch:
lowercase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
lowercase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
for param in model.parameters():
lowercase : List[str] = value
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,snake_case=None ,snake_case=None ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case ,text_encoder=snake_case ,clip_model=snake_case ,tokenizer=snake_case ,unet=snake_case ,scheduler=snake_case ,feature_extractor=snake_case ,coca_model=snake_case ,coca_tokenizer=snake_case ,coca_transform=snake_case ,)
lowercase : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size ,snake_case )
else feature_extractor.size["""shortest_edge"""]
)
lowercase : Dict = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,snake_case )
set_requires_grad(self.clip_model ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.enable_attention_slicing(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.vae ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.vae ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.unet ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
set_requires_grad(self.unet ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = min(int(num_inference_steps * strength ) ,snake_case )
lowercase : List[Any] = max(num_inference_steps - init_timestep ,0 )
lowercase : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
if not isinstance(snake_case ,torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(snake_case )}" )
lowercase : List[str] = image.to(device=snake_case ,dtype=snake_case )
if isinstance(snake_case ,snake_case ):
lowercase : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case )
]
lowercase : Tuple = torch.cat(snake_case ,dim=0 )
else:
lowercase : List[str] = self.vae.encode(snake_case ).latent_dist.sample(snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Any = 0.18_215 * init_latents
lowercase : Dict = init_latents.repeat_interleave(snake_case ,dim=0 )
lowercase : List[str] = randn_tensor(init_latents.shape ,generator=snake_case ,device=snake_case ,dtype=snake_case )
# get latents
lowercase : Optional[int] = self.scheduler.add_noise(snake_case ,snake_case ,snake_case )
lowercase : Optional[Any] = init_latents
return latents
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = self.coca_transform(snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.feature_extractor.preprocess(snake_case )
lowercase : Optional[int] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase : List[Any] = self.clip_model.get_image_features(snake_case )
lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case )
lowercase : Tuple = image_embeddings_clip.repeat_interleave(snake_case ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Optional[int] = latents.detach().requires_grad_()
lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case )
# predict the noise residual
lowercase : Optional[int] = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase : Optional[int] = self.scheduler.alphas_cumprod[timestep]
lowercase : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase : int = torch.sqrt(snake_case )
lowercase : Any = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,snake_case ):
lowercase : Dict = self.scheduler.sigmas[index]
lowercase : Tuple = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Optional[Any] = 1 / 0.18_215 * sample
lowercase : Union[str, Any] = self.vae.decode(snake_case ).sample
lowercase : str = (image / 2 + 0.5).clamp(0 ,1 )
lowercase : int = transforms.Resize(self.feature_extractor_size )(snake_case )
lowercase : Tuple = self.normalize(snake_case ).to(latents.dtype )
lowercase : Tuple = self.clip_model.get_image_features(snake_case )
lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case )
lowercase : str = spherical_dist_loss(snake_case ,snake_case ).mean() * clip_guidance_scale
lowercase : List[Any] = -torch.autograd.grad(snake_case ,snake_case )[0]
if isinstance(self.scheduler ,snake_case ):
lowercase : Any = latents.detach() + grads * (sigma**2)
lowercase : Optional[Any] = noise_pred_original
else:
lowercase : int = noise_pred_original - torch.sqrt(snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self ,snake_case ,snake_case ,snake_case = None ,snake_case = None ,snake_case = 512 ,snake_case = 512 ,snake_case = 0.6 ,snake_case = 50 ,snake_case = 7.5 ,snake_case = 1 ,snake_case = 0.0 ,snake_case = 100 ,snake_case = None ,snake_case = "pil" ,snake_case = True ,snake_case = 0.8 ,snake_case = 0.1 ,snake_case = 0.1 ,):
'''simple docstring'''
if isinstance(snake_case ,snake_case ) and len(snake_case ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(snake_case )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(snake_case ,torch.Generator ) and batch_size > 1:
lowercase : str = [generator] + [None] * (batch_size - 1)
lowercase : Union[str, Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]]
lowercase : int = """, """.join(snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowercase : Optional[int] = self.get_image_description(snake_case )
if style_prompt is None:
if len(snake_case ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowercase : str = self.get_image_description(snake_case )
# get prompt text embeddings for content and style
lowercase : List[Any] = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,)
lowercase : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase : Optional[int] = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,)
lowercase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase : Optional[Any] = slerp(snake_case ,snake_case ,snake_case )
# duplicate text embeddings for each generation per prompt
lowercase : str = text_embeddings.repeat_interleave(snake_case ,dim=0 )
# set timesteps
lowercase : str = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase : Tuple = {}
if accepts_offset:
lowercase : int = 1
self.scheduler.set_timesteps(snake_case ,**snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase , lowercase : Optional[int] = self.get_timesteps(snake_case ,snake_case ,self.device )
lowercase : Tuple = timesteps[:1].repeat(snake_case )
# Preprocess image
lowercase : str = preprocess(snake_case ,snake_case ,snake_case )
lowercase : int = self.prepare_latents(
snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case )
lowercase : List[Any] = preprocess(snake_case ,snake_case ,snake_case )
lowercase : Tuple = self.prepare_latents(
snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case )
lowercase : List[str] = slerp(snake_case ,snake_case ,snake_case )
if clip_guidance_scale > 0:
lowercase : Union[str, Any] = self.get_clip_image_embeddings(snake_case ,snake_case )
lowercase : Optional[int] = self.get_clip_image_embeddings(snake_case ,snake_case )
lowercase : Optional[int] = slerp(
snake_case ,snake_case ,snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase : List[str] = content_text_input.input_ids.shape[-1]
lowercase : Optional[Any] = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=snake_case ,return_tensors="""pt""" )
lowercase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase : Any = uncond_embeddings.repeat_interleave(snake_case ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase : str = torch.randn(snake_case ,generator=snake_case ,device="""cpu""" ,dtype=snake_case ).to(
self.device )
else:
lowercase : Optional[int] = torch.randn(snake_case ,generator=snake_case ,device=self.device ,dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowercase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : str = {}
if accepts_eta:
lowercase : str = eta
# check if the scheduler accepts generator
lowercase : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase : Tuple = generator
with self.progress_bar(total=snake_case ):
for i, t in enumerate(snake_case ):
# expand the latents if we are doing classifier free guidance
lowercase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case )
# predict the noise residual
lowercase : Dict = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase , lowercase : str = noise_pred.chunk(2 )
lowercase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase , lowercase : Union[str, Any] = self.cond_fn(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,)
# compute the previous noisy sample x_t -> x_t-1
lowercase : Any = self.scheduler.step(snake_case ,snake_case ,snake_case ,**snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase : Optional[Any] = 1 / 0.18_215 * latents
lowercase : Any = self.vae.decode(snake_case ).sample
lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 )
lowercase : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase : List[str] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case ,nsfw_content_detected=snake_case )
| 20
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
_UpperCamelCase = '''▁'''
# Segments (not really needed)
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = 3
_UpperCamelCase = 4
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = """left"""
__SCREAMING_SNAKE_CASE = XLNetTokenizer
def __init__(self , __a=None , __a=None , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , **__a , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def UpperCamelCase__ (self , __a , __a = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ (self , __a , __a = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ (self , __a , __a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 369
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 335
| 0
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Any , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ , lowerCamelCase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCamelCase_ = result + left + right
return input_list
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
if len(lowercase ) <= 1:
return input_list
lowerCamelCase_ = list(lowercase )
# iteration for two-way merging
lowerCamelCase_ = 2
while p <= len(lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowercase ) , lowercase ):
lowerCamelCase_ = i
lowerCamelCase_ = i + p - 1
lowerCamelCase_ = (low + high + 1) // 2
lowerCamelCase_ = merge(lowercase , lowercase , lowercase , lowercase )
# final merge of last two parts
if p * 2 >= len(lowercase ):
lowerCamelCase_ = i
lowerCamelCase_ = merge(lowercase , 0 , lowercase , len(lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowerCamelCase : List[Any] = []
else:
lowerCamelCase : Tuple = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 204
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Union[str, Any] = TemporaryFile()
_UpperCAmelCase : List[Any] = 100 # 1000 elements are to be sorted
_UpperCAmelCase ,_UpperCAmelCase : Any = 0, 1 # mean and standard deviation
_UpperCAmelCase : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Any = np.load(outfile)
_UpperCAmelCase : str = len(M) - 1
_UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 222
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(
lowerCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __snake_case ( lowerCamelCase__ ):
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
if self.framework == "tf":
UpperCAmelCase : int =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase : List[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.get_masked_index(_snake_case )
UpperCAmelCase : str =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def UpperCAmelCase__ ( self , snake_case__ ) -> int:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_snake_case )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=None , **snake_case__ ) -> Dict:
'''simple docstring'''
if return_tensors is None:
UpperCAmelCase : List[str] =self.framework
UpperCAmelCase : List[str] =self.tokenizer(_snake_case , return_tensors=_snake_case )
self.ensure_exactly_one_mask_token(_snake_case )
return model_inputs
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.model(**_snake_case )
UpperCAmelCase : List[str] =model_inputs['''input_ids''']
return model_outputs
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=5 , snake_case__=None ) -> Dict:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase : Optional[int] =target_ids.shape[0]
UpperCAmelCase : str =model_outputs['''input_ids'''][0]
UpperCAmelCase : Tuple =model_outputs['''logits''']
if self.framework == "tf":
UpperCAmelCase : str =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase : List[Any] =outputs.numpy()
UpperCAmelCase : Any =outputs[0, masked_index, :]
UpperCAmelCase : Optional[int] =stable_softmax(_snake_case , axis=-1 )
if target_ids is not None:
UpperCAmelCase : List[str] =tf.gather_nd(tf.squeeze(_snake_case , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase : Optional[int] =tf.expand_dims(_snake_case , 0 )
UpperCAmelCase : Union[str, Any] =tf.math.top_k(_snake_case , k=_snake_case )
UpperCAmelCase , UpperCAmelCase : Dict =topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase : List[str] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase : Tuple =outputs[0, masked_index, :]
UpperCAmelCase : Dict =logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase : Optional[int] =probs[..., target_ids]
UpperCAmelCase , UpperCAmelCase : List[str] =probs.topk(_snake_case )
UpperCAmelCase : Union[str, Any] =[]
UpperCAmelCase : Optional[Any] =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase : Dict =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase : str =input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase : Optional[Any] =target_ids[p].tolist()
UpperCAmelCase : Optional[int] =p
# Filter padding out:
UpperCAmelCase : List[Any] =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase : List[str] =self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
UpperCAmelCase : List[str] ={'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_snake_case )
result.append(_snake_case )
if single_mask:
return result[0]
return result
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=None ) -> Optional[int]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase : Tuple =[targets]
try:
UpperCAmelCase : Optional[Any] =self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase : int ={}
UpperCAmelCase : int =[]
for target in targets:
UpperCAmelCase : Optional[Any] =vocab.get(_snake_case , _snake_case )
if id_ is None:
UpperCAmelCase : Optional[Any] =self.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , max_length=1 , truncation=_snake_case , )['''input_ids''']
if len(_snake_case ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
UpperCAmelCase : Optional[int] =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
UpperCAmelCase : Optional[int] =list(set(_snake_case ) )
if len(_snake_case ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
UpperCAmelCase : Any =np.array(_snake_case )
return target_ids
def UpperCAmelCase__ ( self , snake_case__=None , snake_case__=None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ={}
if targets is not None:
UpperCAmelCase : Dict =self.get_target_ids(_snake_case , _snake_case )
UpperCAmelCase : Union[str, Any] =target_ids
if top_k is not None:
UpperCAmelCase : Union[str, Any] =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , snake_case__ , *snake_case__ , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =super().__call__(_snake_case , **_snake_case )
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) == 1:
return outputs[0]
return outputs
| 357
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Any ='''rougeLsum'''
UpperCAmelCase : Optional[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
UpperCAmelCase : str =['''rouge1''', '''rouge2''', '''rougeL''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
UpperCAmelCase : Tuple =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : int =[
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
UpperCAmelCase : Any =[
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
UpperCAmelCase : Optional[Any] =[
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] , newline_sep=__lowerCAmelCase )['''rougeLsum''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCAmelCase : Tuple =calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 78
| 0
|
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Optional[int] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : int = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Tuple = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : List[str]="binary" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str="warn" , ):
SCREAMING_SNAKE_CASE_: List[str] = recall_score(
lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ , pos_label=lowerCAmelCase__ , average=lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , zero_division=lowerCAmelCase__ , )
return {"recall": float(lowerCAmelCase__) if score.size == 1 else score}
| 13
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self ,snake_case ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Tuple = 13
lowercase : str = 7
lowercase : Dict = True
lowercase : Dict = True
lowercase : str = True
lowercase : List[str] = True
lowercase : int = True
lowercase : Union[str, Any] = False
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = 2
lowercase : Optional[Any] = 99
lowercase : int = 0
lowercase : Tuple = 32
lowercase : int = 2
lowercase : Tuple = 4
lowercase : List[Any] = 0.1
lowercase : Tuple = 0.1
lowercase : List[Any] = 512
lowercase : int = 16
lowercase : Dict = 2
lowercase : int = 0.02
lowercase : Union[str, Any] = 3
lowercase : Any = 4
lowercase : List[Any] = """last"""
lowercase : Tuple = True
lowercase : List[Any] = None
lowercase : Any = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase : Tuple = None
if self.use_input_lengths:
lowercase : List[str] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase : List[str] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertModel(config=snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : Optional[Any] = model(snake_case )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertWithLMHeadModel(snake_case )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertForQuestionAnsweringSimple(snake_case )
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : str = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : List[str] = TFFlaubertForTokenClassification(config=snake_case )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Dict = TFFlaubertForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Dict = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Optional[Any]= (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Any= (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple= False
_a : int= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFFlaubertModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowercase : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase : Dict = model(snake_case )[0]
lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 20
| 0
|
import logging
from transformers import PretrainedConfig
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''bertabs'''
def __init__( self: List[str] , UpperCamelCase_: Dict=30_522 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: int=512 , UpperCamelCase_: Optional[int]=8 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Tuple=0.2 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: Union[str, Any]=2_048 , UpperCamelCase_: str=0.2 , **UpperCamelCase_: Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = max_pos
lowercase__ = enc_layers
lowercase__ = enc_hidden_size
lowercase__ = enc_heads
lowercase__ = enc_ff_size
lowercase__ = enc_dropout
lowercase__ = dec_layers
lowercase__ = dec_hidden_size
lowercase__ = dec_heads
lowercase__ = dec_ff_size
lowercase__ = dec_dropout
| 93
|
from collections.abc import Sequence
from queue import Queue
class _a :
def __init__( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Dict=None ) -> Tuple:
"""simple docstring"""
lowercase__ = start
lowercase__ = end
lowercase__ = val
lowercase__ = (start + end) // 2
lowercase__ = left
lowercase__ = right
def __repr__( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class _a :
def __init__( self: Any , UpperCamelCase_: Sequence , UpperCamelCase_: Any ) -> List[str]:
"""simple docstring"""
lowercase__ = collection
lowercase__ = function
if self.collection:
lowercase__ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: int , UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
lowercase__ = (start + end) // 2
lowercase__ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ) -> Dict:
"""simple docstring"""
if node.start == i and node.end == i:
lowercase__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.fn(node.left.val , node.right.val )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Dict ) -> List[Any]:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
if self.root is not None:
lowercase__ = Queue()
queue.put(self.root )
while not queue.empty():
lowercase__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 93
| 1
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]" )
UpperCAmelCase__ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(__A )
DownloadCommand.register_subcommand(__A )
EnvironmentCommand.register_subcommand(__A )
RunCommand.register_subcommand(__A )
ServeCommand.register_subcommand(__A )
UserCommands.register_subcommand(__A )
AddNewModelCommand.register_subcommand(__A )
AddNewModelLikeCommand.register_subcommand(__A )
LfsCommands.register_subcommand(__A )
PTtoTFCommand.register_subcommand(__A )
# Let's go
UpperCAmelCase__ = parser.parse_args()
if not hasattr(__A, "func" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase__ = args.func(__A )
service.run()
if __name__ == "__main__":
main()
| 65
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase : Tuple = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _UpperCAmelCase="no" , _UpperCAmelCase = default_json_config_file , _UpperCAmelCase = False ):
"""simple docstring"""
A_ : str = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
A_ : Optional[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
A_ : str = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
A_ : int = torch.cuda.device_count()
A_ : int = num_gpus
A_ : Tuple = False
if num_gpus > 1:
A_ : Optional[int] = '''MULTI_GPU'''
else:
A_ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
A_ : str = torch.xpu.device_count()
A_ : Optional[int] = num_xpus
A_ : List[str] = False
if num_xpus > 1:
A_ : Any = '''MULTI_XPU'''
else:
A_ : Optional[Any] = '''NO'''
elif is_npu_available():
A_ : Union[str, Any] = torch.npu.device_count()
A_ : Optional[int] = num_npus
A_ : Union[str, Any] = False
if num_npus > 1:
A_ : List[str] = '''MULTI_NPU'''
else:
A_ : Tuple = '''NO'''
else:
A_ : Union[str, Any] = 0
A_ : str = True
A_ : str = 1
A_ : List[Any] = '''NO'''
A_ : Dict = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = parser.add_parser('''default''' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_UpperCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 167
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__A : List[str] = 'src/diffusers'
__A : Optional[int] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
__A : str = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
__A : Optional[int] = spec.loader.load_module()
def __UpperCamelCase ( _A : Dict , _A : Any ) ->int:
"""simple docstring"""
return line.startswith(_A ) or len(_A ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , _A ) is not None
def __UpperCamelCase ( _A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =object_name.split(""".""" )
lowerCamelCase_ =0
# First let's find the module where our object lives.
lowerCamelCase_ =parts[i]
while i < len(_A ) and not os.path.isfile(os.path.join(_A , f'{module}.py' ) ):
i += 1
if i < len(_A ):
lowerCamelCase_ =os.path.join(_A , parts[i] )
if i >= len(_A ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(_A , f'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase_ =""""""
lowerCamelCase_ =0
for name in parts[i + 1 :]:
while (
line_index < len(_A ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_A ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase_ =line_index
while line_index < len(_A ) and _should_continue(lines[line_index] , _A ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ =lines[start_index:line_index]
return "".join(_A )
__A : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
__A : List[str] = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
__A : Optional[int] = re.compile(R'<FILL\s+[^>]*>')
def __UpperCamelCase ( _A : List[str] ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =code.split("""\n""" )
lowerCamelCase_ =0
while idx < len(_A ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_A ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __UpperCamelCase ( _A : Optional[int] ) ->Any:
"""simple docstring"""
lowerCamelCase_ =len(get_indent(_A ) ) > 0
if has_indent:
lowerCamelCase_ =f'class Bla:\n{code}'
lowerCamelCase_ =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_A )
lowerCamelCase_ =black.format_str(_A , mode=_A )
lowerCamelCase_ , lowerCamelCase_ =style_docstrings_in_code(_A )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __UpperCamelCase ( _A : Dict , _A : Dict=False ) ->Union[str, Any]:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
lowerCamelCase_ =[]
lowerCamelCase_ =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_A ):
lowerCamelCase_ =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =search.groups()
lowerCamelCase_ =find_code_in_diffusers(_A )
lowerCamelCase_ =get_indent(_A )
lowerCamelCase_ =line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase_ =theoretical_indent
lowerCamelCase_ =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase_ =True
while line_index < len(_A ) and should_continue:
line_index += 1
if line_index >= len(_A ):
break
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_should_continue(_A , _A ) and re.search(f'^{indent}# End copy' , _A ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase_ =lines[start_index:line_index]
lowerCamelCase_ ="""""".join(_A )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase_ =[line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_A ) is None]
lowerCamelCase_ ="""\n""".join(_A )
# Before comparing, use the `replace_pattern` on the original code.
if len(_A ) > 0:
lowerCamelCase_ =replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCamelCase_ =[_re_replace_pattern.search(_A ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =pattern.groups()
lowerCamelCase_ =re.sub(_A , _A , _A )
if option.strip() == "all-casing":
lowerCamelCase_ =re.sub(obja.lower() , obja.lower() , _A )
lowerCamelCase_ =re.sub(obja.upper() , obja.upper() , _A )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase_ =blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase_ =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase_ =lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase_ =start_index + 1
if overwrite and len(_A ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
return diffs
def __UpperCamelCase ( _A : bool = False ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =glob.glob(os.path.join(_A , """**/*.py""" ) , recursive=_A )
lowerCamelCase_ =[]
for filename in all_files:
lowerCamelCase_ =is_copy_consistent(_A , _A )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(_A ) > 0:
lowerCamelCase_ ="""\n""".join(_A )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : Union[str, Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 49
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = '▁'
__A : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = BertGenerationTokenizer
_UpperCamelCase:List[str] = False
_UpperCamelCase:List[Any] = True
def _snake_case ( self )-> Optional[int]:
super().setUp()
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self )-> Any:
lowerCamelCase_ ="""<s>"""
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1002 )
def _snake_case ( self )-> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self )-> Any:
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self )-> str:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ ="""Hello World!"""
lowerCamelCase_ =[1_8536, 2260, 101]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase_ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _snake_case ( self )-> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ =""" """.join(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BertGenerationConfig()
lowerCamelCase_ =BertGenerationEncoder(_SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> int:
# fmt: off
lowerCamelCase_ ={"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 49
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 200 ) -> int:
_a = [1, 2, 5, 10, 20, 50, 100, 200]
_a = [0] * (pence + 1)
_a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 63
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , A_ : int , A_ : Any=7 , A_ : Tuple=3 , A_ : Union[str, Any]=18 , A_ : Tuple=30 , A_ : Union[str, Any]=400 , A_ : Optional[int]=True , A_ : List[Any]=None , A_ : Dict=True , A_ : Union[str, Any]=None , A_ : Optional[int]=True , A_ : str=[0.48145466, 0.4578275, 0.40821073] , A_ : Tuple=[0.26862954, 0.26130258, 0.27577711] , A_ : Any=True , ) -> str:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_convert_rgb
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def a__ ( self : Any , A_ : Any=False , A_ : Dict=False , A_ : str=False ) -> Union[str, Any]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase_ = []
for i in range(self.batch_size ):
lowerCamelCase_ , lowerCamelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase_ = [torch.from_numpy(A_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=A_ )
@property
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def a__ ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A_ )
lowerCamelCase_ = 3
@property
def a__ ( self : Any ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 208
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(lowercase , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(lowercase , force_download=lowercase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ , lowerCamelCase_ = checkpoint.split('/' )
lowerCamelCase_ = os.path.join(lowercase , lowercase )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(lowercase )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase_ = tokenizer.save_pretrained(
lowercase , legacy_format=lowercase , filename_prefix=lowercase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowercase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 208
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
_lowercase =feature_size
_lowercase =sampling_rate
_lowercase =padding_value
_lowercase =kwargs.pop('''padding_side''' , '''right''' )
_lowercase =kwargs.pop('''return_attention_mask''' , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_lowercase ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase ) == 0:
if return_attention_mask:
_lowercase =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowercase =required_input[0]
if isinstance(UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowercase =0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase ):
_lowercase =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase ):
_lowercase ='''tf'''
elif is_torch_tensor(UpperCAmelCase ):
_lowercase ='''pt'''
elif isinstance(UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
_lowercase ='''np'''
else:
raise ValueError(
f"type of {first_element} unknown: {type(UpperCAmelCase )}. "
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_lowercase =to_numpy(UpperCAmelCase )
else:
_lowercase =[to_numpy(UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowercase =self._get_padding_strategies(padding=UpperCAmelCase , max_length=UpperCAmelCase )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =len(UpperCAmelCase )
if not all(len(UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_lowercase =[]
for i in range(UpperCAmelCase ):
_lowercase ={k: v[i] for k, v in processed_features.items()}
# truncation
_lowercase =self._truncate(
UpperCAmelCase , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
truncated_inputs.append(UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowercase =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowercase =PaddingStrategy.MAX_LENGTH
_lowercase ={}
for i in range(UpperCAmelCase ):
# padding
_lowercase =self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_lowercase =[]
if value.dtype is np.dtype(np.floataa ):
_lowercase =value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase )
return BatchFeature(UpperCAmelCase , tensor_type=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ) -> dict:
_lowercase =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowercase =len(UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowercase =np.ones(len(UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_lowercase =max_length - len(UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
_lowercase =np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_lowercase =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowercase =np.pad(
UpperCAmelCase , UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowercase =np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_lowercase =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowercase =np.pad(
UpperCAmelCase , UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> List[str]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_lowercase =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =len(UpperCAmelCase ) > max_length
if needs_to_be_truncated:
_lowercase =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowercase =processed_features['''attention_mask'''][:max_length]
return processed_features
def __A (self , UpperCAmelCase=False , UpperCAmelCase=None ) -> Dict:
# Get padding strategy
if padding is not False:
if padding is True:
_lowercase =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =PaddingStrategy(UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =padding
else:
_lowercase =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 5
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
| 0
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : Optional[int] = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Dict = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : Optional[int] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _lowercase ( self: Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[List[List[str]]] ,__lowerCAmelCase: List[List[str]] ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: int = 4 ,):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCAmelCase ,hypotheses=__lowerCAmelCase ,min_len=__lowerCAmelCase ,max_len=__lowerCAmelCase )
}
| 340
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample
else:
_lowerCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,)
idx += 1
_lowerCamelCase : int = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Dict = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(__lowerCAmelCase )
| 340
| 1
|
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCAmelCase__ ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowercase_ : Optional[int] = nn.Linear(3 , 4 )
lowercase_ : Optional[Any] = nn.BatchNormad(4 )
lowercase_ : Dict = nn.Linear(4 , 5 )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , model.state_dict() )
lowercase_ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , '''index.json''' )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase_ : Any = os.path.join(__SCREAMING_SNAKE_CASE , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
# TODO: add tests on the fact weights are properly loaded
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase_ : Optional[int] = torch.randn(2 , 3 , dtype=__SCREAMING_SNAKE_CASE )
with TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[Any] = offload_weight(__SCREAMING_SNAKE_CASE , '''weight''' , __SCREAMING_SNAKE_CASE , {} )
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''weight.dat''' )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(__SCREAMING_SNAKE_CASE ).split('''.''' )[1]}} )
lowercase_ : int = load_offloaded_weight(__SCREAMING_SNAKE_CASE , index['''weight'''] )
self.assertTrue(torch.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = ModelForTest()
lowercase_ : Any = model.state_dict()
lowercase_ : Dict = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
lowercase_ : int = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
lowercase_ : Dict = {k: v for k, v in state_dict.items() if '''weight''' in k}
lowercase_ : str = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Duplicates are removed
lowercase_ : Dict = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
lowercase_ : Dict = extract_submodules_state_dict(__SCREAMING_SNAKE_CASE , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''a.1''': 0, '''a.2''': 2} )
lowercase_ : Any = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
lowercase_ : Optional[int] = extract_submodules_state_dict(__SCREAMING_SNAKE_CASE , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 93
|
'''simple docstring'''
_lowercase : int = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
_lowercase : List[str] = True
_lowercase : Optional[int] = False
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase_ : Tuple = chain(next_number(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = number_chain
while number < 10000000:
lowercase_ : int = number_chain
number *= 10
return number_chain
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10000000 ):
"""simple docstring"""
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 93
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
snake_case_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case_ = []
for char_count in range(UpperCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 312
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 1
|
import functools
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = len(_UpperCAmelCase )
@functools.cache
def min_distance(_UpperCAmelCase , _UpperCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCAmelCase ) , 1 + min_distance(_UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=4 , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = parent
_SCREAMING_SNAKE_CASE : Optional[int] = batch_size
_SCREAMING_SNAKE_CASE : List[Any] = seq_length
_SCREAMING_SNAKE_CASE : Dict = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_attention_mask
_SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
_SCREAMING_SNAKE_CASE : Any = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Any = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
_SCREAMING_SNAKE_CASE : int = num_choices
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Any = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[int] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self ) -> int:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
_SCREAMING_SNAKE_CASE : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
_SCREAMING_SNAKE_CASE : Any = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_SCREAMING_SNAKE_CASE : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Optional[int] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 325
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase )
| 325
| 1
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Tuple , _a : Optional[Any] , _a : str=13 , _a : Any=7 , _a : str=True , _a : List[Any]=True , _a : str=False , _a : Optional[int]=True , _a : str=99 , _a : Optional[int]=32 , _a : List[Any]=5 , _a : str=4 , _a : Any=37 , _a : Tuple="gelu" , _a : Union[str, Any]=0.1 , _a : Tuple=0.1 , _a : Tuple=512 , _a : Union[str, Any]=16 , _a : Union[str, Any]=2 , _a : str=0.02 , _a : List[str]=3 , _a : List[Any]=4 , _a : Optional[int]=None , ) -> int:
__lowerCamelCase : Any = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : int = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Tuple = use_token_type_ids
__lowerCamelCase : Any = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : Union[str, Any] = num_choices
__lowerCamelCase : Tuple = scope
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
__lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Dict = None
if self.use_token_type_ids:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Optional[Any] ) -> List[str]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def _lowercase ( self : Dict , _a : Any , _a : int , _a : int , _a : Union[str, Any] , _a : Any , _a : Optional[Any] , _a : Any ) -> List[str]:
__lowerCamelCase : Optional[Any] = BioGptModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : Dict = model(_a , attention_mask=_a )
__lowerCamelCase : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : str , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] , _a : Tuple , _a : Union[str, Any] , _a : int , _a : Any , _a : List[str] , _a : Tuple , ) -> List[str]:
__lowerCamelCase : int = BioGptForCausalLM(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , _a : int , _a : Tuple , _a : Union[str, Any] , _a : List[str] , _a : Optional[Any] , *_a : List[Any] ) -> str:
__lowerCamelCase : Optional[int] = BioGptModel(config=_a )
model.to(_a )
model.eval()
# create attention mask
__lowerCamelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
__lowerCamelCase : List[Any] = self.seq_length // 2
__lowerCamelCase : Optional[Any] = 0
# first forward pass
__lowerCamelCase ,__lowerCamelCase : Any = model(_a , attention_mask=_a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__lowerCamelCase : int = ids_tensor((1,) , _a ).item() + 1
__lowerCamelCase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__lowerCamelCase : Tuple = random_other_next_tokens
# append to next input_ids and attn_mask
__lowerCamelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase : Tuple = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , )
# get two different outputs
__lowerCamelCase : Dict = model(_a , attention_mask=_a )['last_hidden_state']
__lowerCamelCase : Optional[Any] = model(_a , past_key_values=_a , attention_mask=_a )['last_hidden_state']
# select random slice
__lowerCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowercase ( self : Union[str, Any] , _a : Optional[Any] , _a : Optional[int] , _a : str , _a : List[str] , _a : Any , *_a : Dict ) -> Optional[Any]:
__lowerCamelCase : Tuple = BioGptModel(config=_a ).to(_a ).eval()
__lowerCamelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
# first forward pass
__lowerCamelCase : Tuple = model(_a , attention_mask=_a , use_cache=_a )
__lowerCamelCase ,__lowerCamelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowerCamelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowerCamelCase : int = model(_a , attention_mask=_a )['last_hidden_state']
__lowerCamelCase : List[str] = model(_a , attention_mask=_a , past_key_values=_a )[
'last_hidden_state'
]
# select random slice
__lowerCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowercase ( self : List[str] , _a : List[Any] , _a : Any , _a : Dict , _a : int , _a : List[Any] , *_a : List[Any] , _a : Optional[Any]=False ) -> Optional[Any]:
__lowerCamelCase : List[Any] = BioGptForCausalLM(_a )
model.to(_a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__lowerCamelCase : Any = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _lowercase ( self : Any , _a : str , *_a : List[Any] ) -> str:
__lowerCamelCase : List[Any] = BioGptModel(_a )
__lowerCamelCase : Tuple = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _lowercase ( self : Any , _a : List[Any] , _a : List[Any] , _a : List[str] , _a : List[str] , _a : str , *_a : Optional[Any] ) -> Dict:
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : Any = BioGptForTokenClassification(_a )
model.to(_a )
model.eval()
__lowerCamelCase : str = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[Any] ) -> int:
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : List[Any] = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =(
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a_ =(BioGptForCausalLM,) if is_torch_available() else ()
a_ =(
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ =False
def _lowercase ( self : Dict ) -> List[Any]:
__lowerCamelCase : Any = BioGptModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : Tuple ) -> Dict:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Dict = type
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : Optional[Any] ) -> int:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a )
def _lowercase ( self : str ) -> Optional[int]:
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a )
def _lowercase ( self : Tuple ) -> Dict:
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a )
def _lowercase ( self : str ) -> int:
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_a )
def _lowercase ( self : Optional[int] ) -> int:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_a )
@slow
def _lowercase ( self : Optional[int] ) -> int:
__lowerCamelCase : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_a )
__lowerCamelCase : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowerCamelCase : str = 'left'
# Define PAD Token = EOS Token = 50256
__lowerCamelCase : Union[str, Any] = tokenizer.eos_token
__lowerCamelCase : int = model.config.eos_token_id
# use different length sentences to test batching
__lowerCamelCase : int = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCamelCase : Dict = tokenizer(_a , return_tensors='pt' , padding=_a )
__lowerCamelCase : str = inputs['input_ids'].to(_a )
__lowerCamelCase : List[str] = model.generate(
input_ids=_a , attention_mask=inputs['attention_mask'].to(_a ) , )
__lowerCamelCase : Tuple = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(_a )
__lowerCamelCase : Dict = model.generate(input_ids=_a )
__lowerCamelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(_a )
__lowerCamelCase : Optional[Any] = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
__lowerCamelCase : Optional[Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a )
__lowerCamelCase : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
__lowerCamelCase : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
__lowerCamelCase : Any = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
@slow
def _lowercase ( self : Tuple ) -> int:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = BioGptModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase ,__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = input_dict['input_ids']
__lowerCamelCase : List[Any] = input_ids.ne(1 ).to(_a )
__lowerCamelCase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase : List[str] = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase ,__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Dict = 3
__lowerCamelCase : List[Any] = 'multi_label_classification'
__lowerCamelCase : Union[str, Any] = input_dict['input_ids']
__lowerCamelCase : int = input_ids.ne(1 ).to(_a )
__lowerCamelCase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCamelCase : Optional[Any] = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
__lowerCamelCase : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ) -> Dict:
__lowerCamelCase : Union[str, Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__lowerCamelCase : Optional[Any] = torch.tensor([[2, 4805, 9, 656, 21]] )
__lowerCamelCase : Optional[int] = model(_a )[0]
__lowerCamelCase : int = 4_2384
__lowerCamelCase : Any = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _a )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def _lowercase ( self : Dict ) -> Dict:
__lowerCamelCase : Any = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowerCamelCase : Optional[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(_a )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(_a )
__lowerCamelCase : Tuple = model.generate(
**_a , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_a , )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
__lowerCamelCase : int = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(_a , _a )
| 208
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_UpperCamelCase = data_utils.TransfoXLTokenizer
_UpperCamelCase = data_utils.TransfoXLCorpus
_UpperCamelCase = data_utils
_UpperCamelCase = data_utils
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCAmelCase ,'rb' ) as fp:
__lowerCamelCase : Optional[Any] = pickle.load(_lowerCAmelCase ,encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCamelCase : Tuple = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
__lowerCamelCase : str = corpus.vocab.__dict__
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' ,_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCamelCase : int = os.path.abspath(_lowerCAmelCase )
__lowerCamelCase : Any = os.path.abspath(_lowerCAmelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCamelCase : Optional[int] = TransfoXLConfig()
else:
__lowerCamelCase : Optional[int] = TransfoXLConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
__lowerCamelCase : List[str] = TransfoXLLMHeadModel(_lowerCAmelCase )
__lowerCamelCase : Dict = load_tf_weights_in_transfo_xl(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Save pytorch-model
__lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : int = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}' )
torch.save(model.state_dict() ,_lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(_lowerCAmelCase )}' )
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_UpperCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 208
| 1
|
from itertools import product
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> list[int]:
_UpperCAmelCase : Union[str, Any] = sides_number
_UpperCAmelCase : Dict = max_face_number * dice_number
_UpperCAmelCase : List[str] = [0] * (max_total + 1)
_UpperCAmelCase : str = 1
_UpperCAmelCase : Optional[Any] = range(lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase , repeat=lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = sum(lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __SCREAMING_SNAKE_CASE ( ) -> float:
_UpperCAmelCase : Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCAmelCase : str = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : int = 9
_UpperCAmelCase : Any = 4 * 9
_UpperCAmelCase : Any = 6
for peter_total in range(lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase : str = (4**9) * (6**6)
_UpperCAmelCase : str = peter_wins_count / total_games_number
_UpperCAmelCase : Dict = round(lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 189
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be an 'int' type" )
_UpperCAmelCase : List[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
| 1
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
A_ : Optional[int] = "\nimport os\n"
A_ : Tuple = "\ndef foo():\n import os\n return False\n"
A_ : Any = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
A_ : List[Any] = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
A_ : List[str] = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
A_ : Any = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
A_ : Optional[int] = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
A_ : Any = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
A_ : str = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
A_ : Dict = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
A_ : int = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , _SCREAMING_SNAKE_CASE )
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """test_file.py""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as _tmp_file:
_tmp_file.write(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ = get_imports(_SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 165
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_snake_case = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
| 0
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_):
@register_to_config
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ = False , )-> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Embedding(A_ , A_ )
UpperCamelCase = nn.Embedding(A_ , A_ )
UpperCamelCase = False
UpperCamelCase = nn.Dropout(p=A_ )
UpperCamelCase = TaConfig(
vocab_size=A_ , d_model=A_ , num_heads=A_ , d_kv=A_ , d_ff=A_ , dropout_rate=A_ , feed_forward_proj=A_ , is_decoder=A_ , is_encoder_decoder=A_ , )
UpperCamelCase = nn.ModuleList()
for lyr_num in range(A_ ):
UpperCamelCase = TaBlock(A_ )
self.encoders.append(A_ )
UpperCamelCase = TaLayerNorm(A_ )
UpperCamelCase = nn.Dropout(p=A_ )
def UpperCAmelCase_ ( self , A_ , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.token_embedder(A_ )
UpperCamelCase = encoder_input_tokens.shape[1]
UpperCamelCase = torch.arange(A_ , device=encoder_input_tokens.device )
x += self.position_encoding(A_ )
UpperCamelCase = self.dropout_pre(A_ )
# inverted the attention mask
UpperCamelCase = encoder_input_tokens.size()
UpperCamelCase = self.get_extended_attention_mask(A_ , A_ )
for lyr in self.encoders:
UpperCamelCase = lyr(A_ , A_ )[0]
UpperCamelCase = self.layer_norm(A_ )
return self.dropout_post(A_ ), encoder_inputs_mask
| 371
|
'''simple docstring'''
lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def A_( A : int):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A))
def A_( ):
return sum(
number
for number in range(1000 , 100_0000)
if number == digits_fifth_powers_sum(A))
if __name__ == "__main__":
print(solution())
| 251
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ):
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
A_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
A_ = {"unk_token": "<unk>"}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
A_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
A_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : int , **UpperCAmelCase : str ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A ( self : Optional[Any] , **UpperCAmelCase : Optional[int] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A ( self : Optional[Any] , **UpperCAmelCase : Tuple ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Tuple ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : str ):
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def __A ( self : int ):
A_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = CLIPProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 312
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 1
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : List[str] ) -> Optional[Any]:
__A : str = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
if "model" in sd.keys():
__A : Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# pop unnecessary weights
__A : Tuple = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
__A : Any = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__A : int = sd.pop(SCREAMING_SNAKE_CASE_ )
__A : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__A : int = sd[key]
# We split QKV in separate Q,K,V
__A : List[Any] = key.replace('.qkv_proj.' , '.q_proj.' )
__A : int = key.replace('.qkv_proj.' , '.k_proj.' )
__A : Dict = key.replace('.qkv_proj.' , '.v_proj.' )
__A : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__A ,__A ,__A : Optional[Any] = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
__A : Union[str, Any] = q
__A : int = k
__A : Any = v
del sd[key]
return sd
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : Optional[int]=None ) -> List[Any]:
__A : Any = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
__A : Any = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
__A : Dict = OPTConfig()
__A : int = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowercase__ : Tuple = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 366
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE (a__ ):
def __get__( self , _UpperCAmelCase , _UpperCAmelCase=None):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
__A : Optional[Any] = '__cached_' + self.fget.__name__
__A : Union[str, Any] = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if cached is None:
__A : int = self.fget(_UpperCAmelCase)
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return cached
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Tuple:
__A : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def _lowerCAmelCase ( __snake_case : int ) -> Tuple:
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case , np.ndarray )
def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]:
return isinstance(__snake_case , np.ndarray )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Any:
return _is_numpy(__snake_case )
def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]:
import torch
return isinstance(__snake_case , torch.Tensor )
def _lowerCAmelCase ( __snake_case : str ) -> Optional[int]:
return False if not is_torch_available() else _is_torch(__snake_case )
def _lowerCAmelCase ( __snake_case : Any ) -> List[Any]:
import torch
return isinstance(__snake_case , torch.device )
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Dict:
return False if not is_torch_available() else _is_torch_device(__snake_case )
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Optional[int]:
import torch
if isinstance(__snake_case , __snake_case ):
if hasattr(__snake_case , __snake_case ):
__A : str = getattr(__snake_case , __snake_case )
else:
return False
return isinstance(__snake_case , torch.dtype )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Any:
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor )
def _lowerCAmelCase ( __snake_case : Dict ) -> List[Any]:
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def _lowerCAmelCase ( __snake_case : str ) -> List[str]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def _lowerCAmelCase ( __snake_case : Dict ) -> Tuple:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def _lowerCAmelCase ( __snake_case : int ) -> Union[str, Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray )
def _lowerCAmelCase ( __snake_case : int ) -> List[str]:
return False if not is_flax_available() else _is_jax(__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]:
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = fields(self)
# Safety and consistency checks
if not len(_UpperCAmelCase):
raise ValueError(F'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.')
__A : Tuple = getattr(self , class_fields[0].name)
__A : Tuple = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[str] = first_field.items()
__A : List[Any] = True
else:
try:
__A : List[Any] = iter(_UpperCAmelCase)
__A : Optional[Any] = True
except TypeError:
__A : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_UpperCAmelCase):
if (
not isinstance(_UpperCAmelCase , (list, tuple))
or not len(_UpperCAmelCase) == 2
or not isinstance(element[0] , _UpperCAmelCase)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__A : Optional[int] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
__A : Optional[int] = element[1]
elif first_field is not None:
__A : Dict = first_field
else:
for field in class_fields:
__A : List[str] = getattr(self , field.name)
if v is not None:
__A : Union[str, Any] = v
def __delitem__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : List[Any] = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase)
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase)
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__setitem__(_UpperCAmelCase , _UpperCAmelCase)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''longest'''
lowerCAmelCase = '''max_length'''
lowerCAmelCase = '''do_not_pad'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''pt'''
lowerCAmelCase = '''tf'''
lowerCAmelCase = '''np'''
lowerCAmelCase = '''jax'''
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = context_managers
__A : Optional[int] = ExitStack()
def __enter__( self):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_UpperCAmelCase)
def __exit__( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
self.stack.__exit__(*_UpperCAmelCase , **_UpperCAmelCase)
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
__A : Any = infer_framework(__snake_case )
if framework == "tf":
__A : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__A : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
__A : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase ( __snake_case : int ) -> List[Any]:
__A : Any = model_class.__name__
__A : Optional[int] = infer_framework(__snake_case )
if framework == "tf":
__A : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__A : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
__A : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase ( __snake_case : MutableMapping , __snake_case : str = "" , __snake_case : str = "." ) -> Union[str, Any]:
def _flatten_dict(__snake_case : Tuple , __snake_case : List[Any]="" , __snake_case : Tuple="." ):
for k, v in d.items():
__A : List[Any] = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case , __snake_case ):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) )
@contextmanager
def _lowerCAmelCase ( __snake_case : Any , __snake_case : bool = False ) -> List[str]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=None ) -> int:
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case , axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case , axes=__snake_case )
else:
raise ValueError(f'Type not supported for transpose: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : str ) -> str:
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case , __snake_case )
else:
raise ValueError(f'Type not supported for reshape: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : List[str]=None ) -> Any:
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case , axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case , axis=__snake_case )
else:
raise ValueError(f'Type not supported for squeeze: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case , axis=__snake_case )
else:
raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(__snake_case )}.' )
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple ) -> Union[str, Any]:
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list) ):
__A : Tuple = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__A : Dict = f'{repo_id}--{value}'
return auto_map
def _lowerCAmelCase ( __snake_case : List[str] ) -> int:
for base_class in inspect.getmro(__snake_case ):
__A : int = base_class.__module__
__A : List[str] = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 190
| 0
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A__ ( unittest.TestCase ):
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int=13 , _UpperCAmelCase : int=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : int=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=5 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : str=4 , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self : Any ) -> str:
"""simple docstring"""
__lowercase = FlaxDistilBertModelTester(self )
@slow
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained('distilbert-base-uncased' )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowercase = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = (1, 11, 7_68)
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 325
|
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = "xlm-roberta"
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.0_2 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> int:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 187
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = 3
lowerCAmelCase = 250
lowerCAmelCase = ids_tensor((batch_size, length) , A_ )
lowerCAmelCase = torch.ones((batch_size, length) , device=A_ , dtype=torch.float ) / length
return input_ids, scores
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = MaxLengthCriteria(max_length=10 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase, lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(A_ , A_ ) )
lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase, lowerCAmelCase = self._get_tensors(5 )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A_ , A_ ) )
lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A_ , A_ ) )
def __snake_case ( self ) -> Optional[int]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(A_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(A_ ) , 1 )
| 187
| 1
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __a ( A__ ):
def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = eval_examples
UpperCamelCase__ : Dict = post_process_function
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str = "eval" ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase__ : List[str] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase__ : int = self.compute_metrics
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase__ : str = time.time()
try:
UpperCamelCase__ : str = eval_loop(
SCREAMING_SNAKE_CASE , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase__ : Optional[Any] = compute_metrics
UpperCamelCase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase__ : List[Any] = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase__ : Any = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase__ : int = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase__ : Any = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase__ : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE )
return metrics
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : str = "test" ):
'''simple docstring'''
UpperCamelCase__ : int = self.get_test_dataloader(SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase__ : List[Any] = self.compute_metrics
UpperCamelCase__ : str = None
UpperCamelCase__ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase__ : Dict = time.time()
try:
UpperCamelCase__ : Optional[int] = eval_loop(
SCREAMING_SNAKE_CASE , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase__ : str = compute_metrics
UpperCamelCase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase__ : Any = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , output.predictions , "predict" )
UpperCamelCase__ : Union[str, Any] = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCamelCase__ : Optional[int] = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE )
| 189
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __a ( unittest.TestCase , A__ ):
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : Dict = load_tool("text-to-speech" )
self.tool.setup()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = self.tool("hey" )
UpperCamelCase__ : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = self.tool("hey" )
UpperCamelCase__ : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 189
| 1
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__snake_case : Union[str, Any] =TypeVar('T')
__snake_case : Tuple =Union[List[T], Tuple[T, ...]]
__snake_case : Union[str, Any] =Union[T, List[T], Dict[str, T]]
__snake_case : Tuple =Union[str, bytes, os.PathLike]
| 94
|
__snake_case : Any ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Tuple ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 94
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.