code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ :ClassVar[Features] = Features({'audio': Audio()} )
snake_case__ :ClassVar[Features] = Features({'transcription': Value('string' )} )
snake_case__ :str = "audio"
snake_case__ :str = "transcription"
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
lowerCAmelCase__ = copy.deepcopy(self )
lowerCAmelCase__ = self.input_schema.copy()
lowerCAmelCase__ = features[self.audio_column]
lowerCAmelCase__ = input_schema
return task_template
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 48
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48
| 1
|
from __future__ import annotations
class UpperCamelCase__ :
def __init__( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = text, pattern
SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE_ = '''ABAABA'''
SCREAMING_SNAKE_CASE_ = '''AB'''
SCREAMING_SNAKE_CASE_ = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE_ = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 704
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = scheduler
SCREAMING_SNAKE_CASE = optimizers if isinstance(lowerCamelCase__ ,(list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE = split_batches
SCREAMING_SNAKE_CASE = step_with_optimizer
SCREAMING_SNAKE_CASE = GradientState()
def SCREAMING_SNAKE_CASE__ ( self : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE = AcceleratorState().num_processes
for _ in range(lowerCamelCase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,"""total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
else:
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ) -> int:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE__ ( self : Dict ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCamelCase__ ,**lowerCamelCase__ )
| 116
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__magic_name__ : Union[str, Any] = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def A__ ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : bool , __lowerCamelCase : str = None , __lowerCamelCase : list = None ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowerCAmelCase__ = os.path.abspath('''examples''' )
for item in os.listdir(UpperCamelCase__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase__ , feature_script=UpperCamelCase__ , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowerCAmelCase__ = compare_against_test(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase__ = '''\n'''.join(UpperCamelCase__ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase__ = diff.replace(UpperCamelCase__ , '''''' )
self.assertEqual(UpperCamelCase__ , '''''' )
def A__ ( self : str ):
"""simple docstring"""
self.one_complete_example('''complete_nlp_example.py''' , UpperCamelCase__ )
self.one_complete_example('''complete_nlp_example.py''' , UpperCamelCase__ )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowerCAmelCase__ = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.one_complete_example('''complete_cv_example.py''' , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class SCREAMING_SNAKE_CASE__ (_UpperCamelCase ):
lowercase_ : Dict = False
@classmethod
def A__ ( cls : Optional[int] ):
"""simple docstring"""
super().setUpClass()
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def A__ ( cls : List[str] ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n """.split()
lowerCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n """.split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
self.assertNotIn('''epoch 0:''' , UpperCamelCase__ )
self.assertIn('''epoch 1:''' , UpperCamelCase__ )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n """.split()
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
if torch.cuda.is_available():
lowerCAmelCase__ = torch.cuda.device_count()
else:
lowerCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , UpperCamelCase__ )
self.assertIn('''epoch 1:''' , UpperCamelCase__ )
else:
self.assertIn('''epoch 0:''' , UpperCamelCase__ )
self.assertIn('''epoch 1:''' , UpperCamelCase__ )
@slow
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = '''\n examples/by_feature/cross_validation.py\n --num_folds 2\n '''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
lowerCAmelCase__ = re.findall('''({.+})''' , UpperCamelCase__ )
lowerCAmelCase__ = [r for r in results if '''accuracy''' in r][-1]
lowerCAmelCase__ = ast.literal_eval(UpperCamelCase__ )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def A__ ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def A__ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase__ = F"""\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , '''tracking''' ) ) )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def A__ ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 615
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
| 107
| 0
|
_snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
_snake_case : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCAmelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : List[str]=3_0 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=1_0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=2 , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : str ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Any:
__lowerCAmelCase = DeiTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = DeiTForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> str:
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False ) -> int:
__lowerCAmelCase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self : str ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
__lowerCAmelCase = problem_type['title']
__lowerCAmelCase = problem_type['num_labels']
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__lowerCAmelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase ( self : List[str] ) -> List[Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> List[str]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' )
__lowerCAmelCase = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
| 421
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def snake_case__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Tuple=None ):
lowerCAmelCase__ :List[str] = None
if token is not None:
lowerCAmelCase__ :Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
lowerCAmelCase__ :List[Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCAmelCase__ :str = requests.get(a_ , headers=a_ ).json()
lowerCAmelCase__ :Union[str, Any] = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
lowerCAmelCase__ :Optional[Any] = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
lowerCAmelCase__ :Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def snake_case__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=None ):
lowerCAmelCase__ :Optional[Any] = None
if token is not None:
lowerCAmelCase__ :str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
lowerCAmelCase__ :str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowerCAmelCase__ :Union[str, Any] = requests.get(a_ , headers=a_ ).json()
lowerCAmelCase__ :str = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
lowerCAmelCase__ :Tuple = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
lowerCAmelCase__ :int = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase__ :Any = None
if token is not None:
lowerCAmelCase__ :int = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
lowerCAmelCase__ :Any = requests.get(a_ , headers=a_ , allow_redirects=a_ )
lowerCAmelCase__ :str = result.headers['''Location''']
lowerCAmelCase__ :Tuple = requests.get(a_ , allow_redirects=a_ )
lowerCAmelCase__ :Optional[Any] = os.path.join(a_ , F'''{artifact_name}.zip''' )
with open(a_ , "wb" ) as fp:
fp.write(response.content )
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase__ :Tuple = []
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :List[Any] = None
with zipfile.ZipFile(a_ ) as z:
for filename in z.namelist():
if not os.path.isdir(a_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a_ ) as f:
for line in f:
lowerCAmelCase__ :Optional[int] = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase__ :int = line[: line.index(": " )]
lowerCAmelCase__ :Tuple = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
lowerCAmelCase__ :Union[str, Any] = line[len("FAILED " ) :]
failed_tests.append(a_ )
elif filename == "job_name.txt":
lowerCAmelCase__ :Optional[int] = line
if len(a_ ) != len(a_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(a_ )} for `errors` '''
F'''and {len(a_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
lowerCAmelCase__ :Optional[int] = None
if job_name and job_links:
lowerCAmelCase__ :Any = job_links.get(a_ , a_ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase__ :Optional[Any] = [x + [y] + [job_link] for x, y in zip(a_ , a_ )]
return result
def snake_case__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ):
lowerCAmelCase__ :List[str] = []
lowerCAmelCase__ :Dict = [os.path.join(a_ , a_ ) for p in os.listdir(a_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a_ , job_links=a_ ) )
return errors
def snake_case__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
lowerCAmelCase__ :Tuple = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase__ :int = counter.most_common()
lowerCAmelCase__ :Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase__ :List[Any] = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase__ :Union[str, Any] = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=a_ ) )
return r
def snake_case__ ( UpperCAmelCase : Optional[int] ):
lowerCAmelCase__ :Tuple = test.split("::" )[0]
if test.startswith("tests/models/" ):
lowerCAmelCase__ :Optional[int] = test.split("/" )[2]
else:
lowerCAmelCase__ :Optional[Any] = None
return test
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=None ):
lowerCAmelCase__ :Tuple = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase__ :Dict = [x for x in logs if x[2] is not None]
lowerCAmelCase__ :Any = {x[2] for x in logs}
lowerCAmelCase__ :Dict = {}
for test in tests:
lowerCAmelCase__ :str = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase__ :List[str] = counter.most_common()
lowerCAmelCase__ :Tuple = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase__ :Dict = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase__ :List[str] = {'''count''': n_errors, '''errors''': error_counts}
lowerCAmelCase__ :List[str] = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=a_ ) )
return r
def snake_case__ ( UpperCAmelCase : List[Any] ):
lowerCAmelCase__ :Optional[int] = '''| no. | error | status |'''
lowerCAmelCase__ :Optional[Any] = '''|-:|:-|:-|'''
lowerCAmelCase__ :Tuple = [header, sep]
for error in reduced_by_error:
lowerCAmelCase__ :List[Any] = reduced_by_error[error]['''count''']
lowerCAmelCase__ :Any = F'''| {count} | {error[:1_0_0]} | |'''
lines.append(a_ )
return "\n".join(a_ )
def snake_case__ ( UpperCAmelCase : Optional[int] ):
lowerCAmelCase__ :List[Any] = '''| model | no. of errors | major error | count |'''
lowerCAmelCase__ :str = '''|-:|-:|-:|-:|'''
lowerCAmelCase__ :int = [header, sep]
for model in reduced_by_model:
lowerCAmelCase__ :Dict = reduced_by_model[model]['''count''']
lowerCAmelCase__ :Any = list(reduced_by_model[model]["errors"].items() )[0]
lowerCAmelCase__ :List[Any] = F'''| {model} | {count} | {error[:6_0]} | {_count} |'''
lines.append(a_ )
return "\n".join(a_ )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
_a : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_a : List[Any] = get_job_links(args.workflow_run_id, token=args.token)
_a : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_a : Union[str, Any] = k.find(""" / """)
_a : Optional[Any] = k[index + len(""" / """) :]
_a : Any = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_a : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_a : List[str] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_a : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_a : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_a : List[Any] = reduce_by_error(errors)
_a : Tuple = reduce_by_model(errors)
_a : int = make_github_table(reduced_by_error)
_a : List[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 145
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=30 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=2 ,) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = parent
__SCREAMING_SNAKE_CASE :str = batch_size
__SCREAMING_SNAKE_CASE :Optional[Any] = image_size
__SCREAMING_SNAKE_CASE :List[Any] = patch_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_channels
__SCREAMING_SNAKE_CASE :Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE :Any = use_labels
__SCREAMING_SNAKE_CASE :List[Any] = hidden_size
__SCREAMING_SNAKE_CASE :str = num_hidden_layers
__SCREAMING_SNAKE_CASE :Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE :List[str] = intermediate_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_act
__SCREAMING_SNAKE_CASE :Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = type_sequence_label_size
__SCREAMING_SNAKE_CASE :Any = initializer_range
__SCREAMING_SNAKE_CASE :int = scope
__SCREAMING_SNAKE_CASE :List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__SCREAMING_SNAKE_CASE :Optional[Any] = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_patches + 2
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Tuple = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE :Any = 1
__SCREAMING_SNAKE_CASE :List[Any] = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE :Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE :List[Any] = 1
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = config_and_inputs
__SCREAMING_SNAKE_CASE :Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFDeiTModelTester(self )
__SCREAMING_SNAKE_CASE :str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :str = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE :List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense ) )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :str = model_class(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :List[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :str = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( ) -> Tuple:
__SCREAMING_SNAKE_CASE :Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
__SCREAMING_SNAKE_CASE :int = self.default_image_processor
__SCREAMING_SNAKE_CASE :str = prepare_img()
__SCREAMING_SNAKE_CASE :int = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE :int = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
__SCREAMING_SNAKE_CASE :Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4 ) )
| 498
| 0
|
from collections.abc import Callable
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = a
lowerCAmelCase_ = b
if function(_A ) == 0: # one of the a or b is a root for the function
return a
elif function(_A ) == 0:
return b
elif (
function(_A ) * function(_A ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowerCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_A ) == 0:
return mid
elif function(_A ) * function(_A ) < 0:
lowerCAmelCase_ = mid
else:
lowerCAmelCase_ = mid
lowerCAmelCase_ = start + (end - start) / 2.0
return mid
def __UpperCamelCase ( _A ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 703
|
from __future__ import annotations
_A = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase_ = {}
lowerCAmelCase_ = source_vertex
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.source_vertex}
lowerCAmelCase_ = None
lowerCAmelCase_ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
lowerCAmelCase_ = vertex
queue.append(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase_ = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
lowerCAmelCase_ = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + f"->{target_vertex}"
if __name__ == "__main__":
_A = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 325
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=SCREAMING_SNAKE_CASE__ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=SCREAMING_SNAKE_CASE__ , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=SCREAMING_SNAKE_CASE__ , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=SCREAMING_SNAKE_CASE__ , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ = parser.parse_args()
return args
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def fn(SCREAMING_SNAKE_CASE__ ):
return tokenizer(examples['''text'''] )
return fn
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ = tf.train.Features(feature=SCREAMING_SNAKE_CASE__ )
snake_case_ = tf.train.Example(features=SCREAMING_SNAKE_CASE__ )
snake_case_ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE__ )
return records
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ = min(len(SCREAMING_SNAKE_CASE__ ) , args.limit )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ = tokenize_function(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE__ ):
# Concatenate all texts.
snake_case_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ = dataset_tokenized.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , batch_size=1000 , num_proc=4 )
snake_case_ = 0
snake_case_ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE__ ) , args.shard_size ):
snake_case_ = grouped_dataset[shard : shard + args.shard_size]
snake_case_ = len(dataset_snapshot['''input_ids'''] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ = get_serialized_examples(SCREAMING_SNAKE_CASE__ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE__ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE__ )
print('''Wrote file {} containing {} records'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 39
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any=1_3 , lowerCAmelCase__ : List[Any]=3_2 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Any=1_6 , lowerCAmelCase__ : Optional[int]=[1, 2, 1] , lowerCAmelCase__ : Tuple=[2, 2, 4] , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Optional[Any]=2.0 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=1_0 , lowerCAmelCase__ : Union[str, Any]=8 , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : List[Any] = patch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[Any] = embed_dim
_UpperCAmelCase : int = depths
_UpperCAmelCase : Tuple = num_heads
_UpperCAmelCase : int = window_size
_UpperCAmelCase : Optional[int] = mlp_ratio
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = drop_path_rate
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Dict = use_absolute_embeddings
_UpperCAmelCase : Optional[Any] = patch_norm
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : int = scope
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : Dict = encoder_stride
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = SwinvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = SwinvaForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Union[str, Any] = SwinvaForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.type_sequence_label_size
_UpperCAmelCase : Dict = SwinvaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase_ : Dict = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : List[Any] = False
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = SwinvaModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=3_7 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = True
for model_class in self.all_model_classes:
_UpperCAmelCase : str = True
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : int = True
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : int = outputs.attentions
_UpperCAmelCase : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = config.window_size**2
_UpperCAmelCase : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
_UpperCAmelCase : int = True
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Any = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_UpperCAmelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCAmelCase : Dict = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase__ ) )
_UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : Union[str, Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# Swinv2 has a different seq_length
_UpperCAmelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = reshaped_hidden_states[0].shape
_UpperCAmelCase : List[Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : int = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = 3
_UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Tuple = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = SwinvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase : Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Dict = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 494
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class _snake_case ( a__ ):
lowerCAmelCase :Dict = '''roberta-prelayernorm'''
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-1_2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase)
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Tuple = layer_norm_eps
UpperCAmelCase__ : Dict = position_embedding_type
UpperCAmelCase__ : List[str] = use_cache
UpperCAmelCase__ : Optional[int] = classifier_dropout
class _snake_case ( a__ ):
@property
def snake_case__ ( self):
if self.task == "multiple-choice":
UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 113
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase :Union[str, Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase :Optional[Any] = False
lowerCAmelCase :Dict = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase)
if return_labels:
if model_class in get_values(_lowerCamelCase):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Any = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : str = use_input_mask
UpperCAmelCase__ : int = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : Optional[int] = embedding_size
def snake_case__ ( self):
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = TFMobileBertModel(config=_lowerCamelCase)
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : int = model(_lowerCamelCase)
UpperCAmelCase__ : Dict = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = TFMobileBertForMaskedLM(config=_lowerCamelCase)
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Dict = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=_lowerCamelCase)
UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = TFMobileBertForPreTraining(config=_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Any = model(_lowerCamelCase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Any = self.num_labels
UpperCAmelCase__ : Optional[Any] = TFMobileBertForSequenceClassification(config=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Tuple = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Tuple = self.num_choices
UpperCAmelCase__ : Dict = TFMobileBertForMultipleChoice(config=_lowerCamelCase)
UpperCAmelCase__ : int = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : str = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : Optional[Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Optional[Any] = TFMobileBertForTokenClassification(config=_lowerCamelCase)
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Any = TFMobileBertForQuestionAnswering(config=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def snake_case__ ( self):
UpperCAmelCase__ : str = TFMobileBertModelTest.TFMobileBertModelTester(self)
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCamelCase)
@slow
def snake_case__ ( self):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCAmelCase__ : Optional[Any] = TFMobileBertModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""")
UpperCAmelCase__ : str = tf.constant([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : List[str] = [1, 6, 3_0522]
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-4)
| 113
| 1
|
"""simple docstring"""
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__magic_name__: List[Any] = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase )) % modulo_value
def a ( __UpperCAmelCase : int = 1_7_7_7 , __UpperCAmelCase : int = 1_8_5_5 , __UpperCAmelCase : int = 8 ) -> int:
__magic_name__: int = base
for _ in range(1 , __UpperCAmelCase ):
__magic_name__: Optional[Any] = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 1_0**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 96
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425
| 0
|
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = name
__UpperCamelCase = value
__UpperCamelCase = weight
def __repr__( self )-> List[str]:
'''simple docstring'''
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
return self.value
def A__ ( self )-> str:
'''simple docstring'''
return self.name
def A__ ( self )-> Optional[int]:
'''simple docstring'''
return self.weight
def A__ ( self )-> Optional[int]:
'''simple docstring'''
return self.value / self.weight
def A_ ( snake_case : Union[str, Any] , snake_case : Dict , snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = []
for i in range(len(__lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( snake_case : Tuple , snake_case : List[Any] , snake_case : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase = sorted(__lowerCAmelCase , key=__lowerCAmelCase , reverse=__lowerCAmelCase )
__UpperCamelCase = []
__UpperCamelCase , __UpperCamelCase = 0.0, 0.0
for i in range(len(__lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ) -> List[str]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase__ : Dict = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
__UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__UpperCamelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
__UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__UpperCamelCase = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , newline='''\n''' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with a really long name
__UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE_ ) , )
| 451
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = """biogpt"""
def __init__( self , __UpperCAmelCase=4_2_3_8_4 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=2_4 , __UpperCAmelCase=1_6 , __UpperCAmelCase=4_0_9_6 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_2_4 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = vocab_size
lowerCAmelCase__ :str = max_position_embeddings
lowerCAmelCase__ :List[str] = hidden_size
lowerCAmelCase__ :Any = num_hidden_layers
lowerCAmelCase__ :int = num_attention_heads
lowerCAmelCase__ :Optional[int] = intermediate_size
lowerCAmelCase__ :Union[str, Any] = hidden_act
lowerCAmelCase__ :Any = hidden_dropout_prob
lowerCAmelCase__ :Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Union[str, Any] = initializer_range
lowerCAmelCase__ :Optional[Any] = layer_norm_eps
lowerCAmelCase__ :int = scale_embedding
lowerCAmelCase__ :List[str] = use_cache
lowerCAmelCase__ :int = layerdrop
lowerCAmelCase__ :Union[str, Any] = activation_dropout
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 93
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase__ = '''true'''
def __snake_case ( lowercase : Optional[int] , lowercase : Tuple=82 , lowercase : int=16 ):
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(lowercase )
snake_case_ = RegressionDataset(length=lowercase )
snake_case_ = DataLoader(lowercase , batch_size=lowercase )
model.to(accelerator.device )
snake_case_ , snake_case_ = accelerator.prepare(lowercase , lowercase )
return model, ddp_model, dataloader
def __snake_case ( lowercase : Accelerator , lowercase : int=False ):
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
snake_case_ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(lowercase : Optional[Any] ):
snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
with accelerator.main_process_first():
snake_case_ = dataset.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
snake_case_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : List[Any] ):
if use_longest:
return tokenizer.pad(lowercase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(lowercase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(lowercase , shuffle=lowercase , collate_fn=lowercase , batch_size=16 )
def __snake_case ( lowercase : List[Any] , lowercase : str ):
snake_case_ = Accelerator(dispatch_batches=lowercase , split_batches=lowercase )
snake_case_ = get_dataloader(lowercase , not dispatch_batches )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=lowercase )
snake_case_ , snake_case_ = accelerator.prepare(lowercase , lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( lowercase : List[Any] , lowercase : str , lowercase : Tuple ):
snake_case_ = []
for batch in dataloader:
snake_case_ , snake_case_ = batch.values()
with torch.no_grad():
snake_case_ = model(lowercase )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case_ , snake_case_ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase )
targs.append(lowercase )
snake_case_ , snake_case_ = torch.cat(lowercase ), torch.cat(lowercase )
return logits, targs
def __snake_case ( lowercase : Accelerator , lowercase : Union[str, Any]=82 , lowercase : str=False , lowercase : List[Any]=False , lowercase : int=16 ):
snake_case_ , snake_case_ , snake_case_ = get_basic_setup(lowercase , lowercase , lowercase )
snake_case_ , snake_case_ = generate_predictions(lowercase , lowercase , lowercase )
assert (
len(lowercase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase )}'''
def __snake_case ( lowercase : bool = False , lowercase : bool = False ):
snake_case_ = evaluate.load("glue" , "mrpc" )
snake_case_ , snake_case_ = get_mrpc_setup(lowercase , lowercase )
# First do baseline
snake_case_ , snake_case_ , snake_case_ = setup["no"]
model.to(lowercase )
model.eval()
for batch in dataloader:
batch.to(lowercase )
with torch.inference_mode():
snake_case_ = model(**lowercase )
snake_case_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase , references=batch["labels"] )
snake_case_ = metric.compute()
# Then do distributed
snake_case_ , snake_case_ , snake_case_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case_ = model(**lowercase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ = batch["labels"]
snake_case_ , snake_case_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase , references=lowercase )
snake_case_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
snake_case_ = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase , lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case_ = Accelerator(split_batches=lowercase , dispatch_batches=lowercase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
snake_case_ = Accelerator()
test_torch_metrics(lowercase , 512 )
accelerator.state._reset_state()
def __snake_case ( lowercase : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 508
| 0
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , *lowerCamelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
return fun(lowerCamelCase_ , *lowerCamelCase_ ), None
except Exception as e:
return None, e
UpperCamelCase__ : Any = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
UpperCamelCase__ : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
UpperCamelCase__ : Union[str, Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
UpperCamelCase__ : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
UpperCamelCase__ : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCamelCase__ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = HashMap(initial_block_size=4 )
SCREAMING_SNAKE_CASE_ : Any = {}
for _, (fun, *args) in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _run_operation(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = _run_operation(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ )
assert my_res == py_res
assert str(lowerCamelCase_ ) == str(lowerCamelCase_ )
assert set(lowerCamelCase_ ) == set(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
def is_public(lowerCamelCase_ : str ) -> bool:
return not name.startswith('_' )
SCREAMING_SNAKE_CASE_ : int = {name for name in dir({} ) if is_public(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {name for name in dir(HashMap() ) if is_public(lowerCamelCase_ )}
assert dict_public_names > hash_public_names
| 702
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685
| 0
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
lowerCAmelCase__ :Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ = 16 , lowerCAmelCase_ = 88 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = "geglu" , lowerCAmelCase_ = True , lowerCAmelCase_ = True , ):
'''simple docstring'''
super().__init__()
a_ : List[Any] = num_attention_heads
a_ : Optional[int] = attention_head_dim
a_ : Union[str, Any] = num_attention_heads * attention_head_dim
a_ : Any = in_channels
a_ : List[str] = torch.nn.GroupNorm(num_groups=lowerCAmelCase_ , num_channels=lowerCAmelCase_ , eps=1E-6 , affine=lowerCAmelCase_ )
a_ : List[Any] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. Define transformers blocks
a_ : Any = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dropout=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , double_self_attention=lowerCAmelCase_ , norm_elementwise_affine=lowerCAmelCase_ , )
for d in range(lowerCAmelCase_ )
] )
a_ : List[str] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_=None , lowerCAmelCase_ = True , ):
'''simple docstring'''
a_ , a_ , a_ , a_ : Optional[Any] = hidden_states.shape
a_ : List[Any] = batch_frames // num_frames
a_ : Tuple = hidden_states
a_ : str = hidden_states[None, :].reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
a_ : Optional[Any] = self.norm(lowerCAmelCase_ )
a_ : str = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Union[str, Any] = self.proj_in(lowerCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
a_ : List[str] = block(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , class_labels=lowerCAmelCase_ , )
# 3. Output
a_ : Tuple = self.proj_out(lowerCAmelCase_ )
a_ : int = (
hidden_states[None, None, :]
.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
a_ : Tuple = hidden_states.reshape(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a_ : Optional[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCAmelCase_ )
| 577
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_config(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ).save_pretrained(_SCREAMING_SNAKE_CASE )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 620
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCAmelCase : Tuple = ['accelerate', 'launch']
__lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate'
__lowerCAmelCase : List[str] = 'default_config.yaml'
__lowerCAmelCase : List[Any] = config_folder / config_file
__lowerCAmelCase : str = config_folder / '_default_config.yaml'
__lowerCAmelCase : Optional[int] = Path('tests/test_configs' )
@classmethod
def lowerCAmelCase__ ( cls):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def lowerCAmelCase__ ( cls):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=_A):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = 'test-tpu'
__lowerCAmelCase : str = 'us-central1-a'
__lowerCAmelCase : Union[str, Any] = 'ls'
__lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config']
__lowerCAmelCase : Union[str, Any] = 'cd /usr/share'
__lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh'
__lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
| 620
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case = '''\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'''
def A_ ( _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : List[str]=8 ):
_lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def A_ ( _lowerCamelCase : Any , _lowerCamelCase : Any=512 , _lowerCamelCase : Any=512 ):
_lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCAmelCase = np.array(pil_image.convert('RGB' ) )
_lowerCAmelCase = arr.astype(np.floataa ) / 127.5 - 1
_lowerCAmelCase = np.transpose(__A , [2, 0, 1] )
_lowerCAmelCase = torch.from_numpy(__A ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE ( __lowercase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
_lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCAmelCase = min(int(num_inference_steps * strength ) , _A )
_lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}" )
_lowerCAmelCase = image.to(device=_A , dtype=_A )
_lowerCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_A )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_A , _A ):
_lowerCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
_lowerCAmelCase = torch.cat(_A , dim=0 )
else:
_lowerCAmelCase = self.movq.encode(_A ).latent_dist.sample(_A )
_lowerCAmelCase = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase = init_latents.shape
_lowerCAmelCase = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
_lowerCAmelCase = self.scheduler.add_noise(_A , _A , _A )
_lowerCAmelCase = init_latents
return latents
def a ( self : Dict , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowerCAmelCase = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def a ( self : Tuple , __lowerCAmelCase : Dict=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_lowerCAmelCase = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
_lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a ( self : Any ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict = 512 , __lowerCAmelCase : Any = 512 , __lowerCAmelCase : Optional[int] = 100 , __lowerCAmelCase : Optional[Any] = 4.0 , __lowerCAmelCase : Tuple = 0.3 , __lowerCAmelCase : str = 1 , __lowerCAmelCase : Tuple = None , __lowerCAmelCase : Dict = "pil" , __lowerCAmelCase : List[str] = True , ):
"""simple docstring"""
_lowerCAmelCase = self._execution_device
_lowerCAmelCase = guidance_scale > 1.0
if isinstance(_A , _A ):
_lowerCAmelCase = torch.cat(_A , dim=0 )
_lowerCAmelCase = image_embeds.shape[0]
if isinstance(_A , _A ):
_lowerCAmelCase = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase = image_embeds.repeat_interleave(_A , dim=0 )
_lowerCAmelCase = negative_image_embeds.repeat_interleave(_A , dim=0 )
_lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
_lowerCAmelCase = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
_lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=_A )
_lowerCAmelCase = self.movq.encode(_A )['''latents''']
_lowerCAmelCase = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
_lowerCAmelCase = self.get_timesteps(_A , _A , _A )
_lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase = downscale_height_and_width(_A , _A , self.movq_scale_factor )
_lowerCAmelCase = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = {'''image_embeds''': image_embeds}
_lowerCAmelCase = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = variance_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
_lowerCAmelCase = self.movq.decode(_A , force_not_quantize=_A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase = image * 0.5 + 0.5
_lowerCAmelCase = image.clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 309
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = "switch_transformers"
_SCREAMING_SNAKE_CASE : int = ["past_key_values"]
_SCREAMING_SNAKE_CASE : Optional[Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _A=32128 , _A=768 , _A=64 , _A=2048 , _A=64 , _A=12 , _A=3 , _A=12 , _A=3 , _A=12 , _A=8 , _A=False , _A=0.01 , _A="float32" , _A=False , _A=32 , _A=128 , _A=0.1 , _A=1e-6 , _A=0.001 , _A=0.001 , _A=1.0 , _A="relu" , _A=True , _A=False , _A=True , _A=0 , _A=1 , **_A , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = d_model
_UpperCAmelCase : Dict = d_kv
_UpperCAmelCase : str = d_ff
_UpperCAmelCase : int = num_sparse_encoder_layers
_UpperCAmelCase : Dict = num_layers
_UpperCAmelCase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_UpperCAmelCase : int = self.num_layers // self.num_sparse_encoder_layers
else:
_UpperCAmelCase : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_UpperCAmelCase : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_UpperCAmelCase : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
_UpperCAmelCase : Any = num_heads
_UpperCAmelCase : List[Any] = num_experts
_UpperCAmelCase : List[str] = expert_capacity
_UpperCAmelCase : List[str] = router_bias
_UpperCAmelCase : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''')
_UpperCAmelCase : List[str] = router_dtype
_UpperCAmelCase : Any = router_ignore_padding_tokens
_UpperCAmelCase : Optional[Any] = relative_attention_num_buckets
_UpperCAmelCase : Optional[int] = relative_attention_max_distance
_UpperCAmelCase : List[Any] = dropout_rate
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Union[str, Any] = initializer_factor
_UpperCAmelCase : int = feed_forward_proj
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = add_router_probs
_UpperCAmelCase : Optional[int] = router_z_loss_coef
_UpperCAmelCase : List[str] = router_aux_loss_coef
_UpperCAmelCase : Union[str, Any] = self.feed_forward_proj.split('''-''')
_UpperCAmelCase : int = act_info[-1]
_UpperCAmelCase : int = act_info[0] == '''gated'''
if len(_A) > 1 and act_info[0] != "gated" or len(_A) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
| 485
| 0
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCamelCase = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def _A ( _lowerCAmelCase=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A ) )
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
__lowercase =dataset_module_factory(_lowerCAmelCase , cache_dir=_lowerCAmelCase)
__lowercase =import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase)
__lowercase =builder_cls(
cache_dir=_lowerCAmelCase , config_name=_lowerCAmelCase , hash=dataset_module.hash , )
__lowercase ='/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_lowerCAmelCase).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
__lowercase =cached_path(_lowerCAmelCase , cache_dir=_lowerCAmelCase)
self.assertTrue(os.path.exists(_lowerCAmelCase))
@pytest.mark.integration
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
__lowercase =dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
__lowercase =import_main_class(dataset_module.module_path )
__lowercase =builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowercase =None
builder_instance.download_and_prepare()
__lowercase =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase )
__lowercase =import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
__lowercase =builder_cls(
cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
__lowercase =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['train'] , _lowerCAmelCase )
assert next(iter(ds['train'] ) )
| 454
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _A ( ):
"""simple docstring"""
__lowercase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase =get_sagemaker_input()
else:
__lowercase =get_cluster_input()
return config
def _A ( _lowerCAmelCase=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
__lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =get_user_input()
if args.config_file is not None:
__lowercase =args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
__lowercase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def _A ( ):
"""simple docstring"""
__lowercase =config_command_parser()
__lowercase =parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
A_: str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = os.path.dirname(os.path.realpath(_A ) )
_lowercase = os.path.join(_A ,"""words.txt""" )
_lowercase = """"""
with open(_A ) as f:
_lowercase = f.readline()
_lowercase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_lowercase = [
word
for word in [sum(ord(_A ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_A )
if __name__ == "__main__":
print(solution())
| 398
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_: Optional[Any] = logging.get_logger(__name__)
A_: Union[str, Any] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'deformable_detr'
lowerCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=300 , UpperCAmelCase=1024 , UpperCAmelCase=6 , UpperCAmelCase=1024 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=1024 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase="resnet50" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=False , UpperCAmelCase=300 , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.25 , UpperCAmelCase=False , **UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase = backbone_config.get("""model_type""" )
_lowercase = CONFIG_MAPPING[backbone_model_type]
_lowercase = config_class.from_dict(UpperCAmelCase )
_lowercase = use_timm_backbone
_lowercase = backbone_config
_lowercase = num_channels
_lowercase = num_queries
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = init_xavier_std
_lowercase = encoder_layerdrop
_lowercase = auxiliary_loss
_lowercase = position_embedding_type
_lowercase = backbone
_lowercase = use_pretrained_backbone
_lowercase = dilation
# deformable attributes
_lowercase = num_feature_levels
_lowercase = encoder_n_points
_lowercase = decoder_n_points
_lowercase = two_stage
_lowercase = two_stage_num_proposals
_lowercase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_lowercase = class_cost
_lowercase = bbox_cost
_lowercase = giou_cost
# Loss coefficients
_lowercase = mask_loss_coefficient
_lowercase = dice_loss_coefficient
_lowercase = bbox_loss_coefficient
_lowercase = giou_loss_coefficient
_lowercase = eos_coefficient
_lowercase = focal_alpha
_lowercase = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowercase = self.backbone_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 398
| 1
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__SCREAMING_SNAKE_CASE = getLogger(__name__)
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 8 , lowerCAmelCase__ : int = 1024 , lowerCAmelCase__ : Dict="val" , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=False , lowerCAmelCase__ : str="summarization" , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : List[str]="" , **lowerCAmelCase__ : int , ):
a__ : List[Any] = str(lowerCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=lowerCAmelCase__ )
a__ : Dict = Path(lowerCAmelCase__ )
a__ : Any = save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(lowerCAmelCase__ )
a__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).cuda()
if fpaa:
a__ : List[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) # update config with task specific params
a__ : Dict = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
a__ : List[str] = num_return_sequences
a__ : Any = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
a__ : Tuple = tokenizer.model_max_length
if prefix is None:
a__ : Tuple = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
a__ : str = SeqaSeqDataset(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_target_length=1024 , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
a__ : Dict = ds.make_sortish_sampler(lowerCAmelCase__ , distributed=lowerCAmelCase__ , add_extra_examples=lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
a__ : Optional[int] = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn )
a__ : Optional[int] = []
for batch in tqdm(lowerCAmelCase__ ):
a__ : str = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
a__ : Union[str, Any] = batch['''ids''']
if num_return_sequences > 1:
a__ : Any = chunks(lowerCAmelCase__ , lowerCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return results, sampler.num_replicas
def __a ( ):
a__ : int = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=lowerCAmelCase__ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=lowerCAmelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=lowerCAmelCase__ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument(
'''--type_path''' , type=lowerCAmelCase__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=lowerCAmelCase__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=lowerCAmelCase__ , default=600 , required=lowerCAmelCase__ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'''--prefix''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
a__ : Tuple = time.time()
a__ : List[str] = parser.parse_known_args()
a__ : List[str] = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
a__ : Dict = Path(args.save_dir + '''_tmp''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) # this handles locking.
a__ : Dict = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
a__ : Tuple = {}
if args.src_lang is not None:
a__ : Dict = args.src_lang
if args.tgt_lang is not None:
a__ : Optional[int] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase__ )
a__ : Union[str, Any] = eval_data_dir(
args.data_dir , lowerCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
if args.local_rank <= 0:
a__ : Any = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
a__ : Optional[Any] = gather_results_from_each_node(lowerCAmelCase__ , lowerCAmelCase__ , args.sync_timeout )
a__ : List[Any] = combine_partial_results(lowerCAmelCase__ )
if args.num_return_sequences > 1:
a__ : Union[str, Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return
a__ : Any = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(lowerCAmelCase__ ) as f:
a__ : int = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
a__ : Optional[Any] = '''translation''' in args.task
a__ : List[str] = calculate_bleu if calc_bleu else calculate_rouge
a__ : List[str] = '''bleu''' if calc_bleu else '''rouge'''
a__ : Dict = score_fn(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str = len(lowerCAmelCase__ )
a__ : Optional[int] = time.time() - start_time
a__ : List[Any] = round(runtime / metrics['''n_obs'''] , 4 )
a__ : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
a__ : Any = save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
print(lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : List[Any] ):
a__ : str = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__ )
a__ : Tuple = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["id"] )
a__ : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def __a ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ):
# WAIT FOR lots of .json files
a__ : Optional[int] = time.time()
logger.info('''waiting for all nodes to finish''' )
a__ : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
a__ : str = list(save_dir.glob('''rank_*.json''' ) )
if len(lowerCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
a__ : Any = lmap(lowerCAmelCase__ , lowerCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 715
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , A__ : int ) -> Optional[Any]:
'''simple docstring'''
a__ : list[list[Edge]] = [[] for _ in range(A__ )]
a__ : int = size
def __getitem__( self : Any , A__ : int ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return self._size
def __lowerCAmelCase ( self : int , A__ : int , A__ : int , A__ : int ) -> Optional[int]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(A__ , A__ ) )
def __lowerCAmelCase ( self : int , A__ : int , A__ : int ) -> int | None:
'''simple docstring'''
a__ : Tuple = deque([start_vertex] )
a__ : list[int | None] = [None] * self.size
a__ : Optional[int] = 0
while queue:
a__ : Union[str, Any] = queue.popleft()
a__ : Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
a__ : Dict = current_distance + edge.weight
a__ : Optional[int] = distances[edge.destination_vertex]
if (
isinstance(A__ , A__ )
and new_distance >= dest_vertex_distance
):
continue
a__ : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340
| 0
|
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __A ( lowerCamelCase_ = 3 ):
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowerCamelCase_ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
SCREAMING_SNAKE_CASE : str = QuantumRegister(lowerCamelCase_ , """qr""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = ClassicalRegister(lowerCamelCase_ , """cr""" )
SCREAMING_SNAKE_CASE : Dict = QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = number_of_qubits
for i in range(lowerCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCamelCase_ , lowerCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCamelCase_ , lowerCamelCase_ )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE : List[str] = Aer.get_backend("""qasm_simulator""" )
SCREAMING_SNAKE_CASE : int = execute(lowerCamelCase_ , lowerCamelCase_ , shots=1_00_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 379
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 379
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : Tuple = ['pixel_values']
def __init__( self : Any , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : str , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE_: Any =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Any =do_resize
SCREAMING_SNAKE_CASE_: List[Any] =size
SCREAMING_SNAKE_CASE_: str =resample
SCREAMING_SNAKE_CASE_: Tuple =do_rescale
SCREAMING_SNAKE_CASE_: Tuple =rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] =do_center_crop
SCREAMING_SNAKE_CASE_: Any =crop_size
SCREAMING_SNAKE_CASE_: List[Any] =do_flip_channel_order
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: Any =get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(lowerCAmelCase , data_format=lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: str =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE_: List[str] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: Optional[Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Tuple =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Dict =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Any =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: str =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.flip_channel_order(image=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: int =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =target_sizes.numpy()
SCREAMING_SNAKE_CASE_: int =[]
for idx in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_: Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 1
|
'''simple docstring'''
import math
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( __lowerCAmelCase : int = 1_00_01 ):
"""simple docstring"""
try:
snake_case__ : List[Any] = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case__ : list[int] = []
snake_case__ : Dict = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ : str = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def __magic_name__ ( self : Dict , **snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Tuple = {}
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case__ : Any = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
snake_case__ : List[str] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
snake_case__ : int = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
snake_case__ : int = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
snake_case__ : List[str] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
snake_case__ : int = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
snake_case__ : Optional[Any] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
snake_case__ : List[str] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
snake_case__ : int = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
snake_case__ : str = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , snake_case_ : Optional[int] , *snake_case_ : Any , snake_case_ : str=None , snake_case_ : List[Any]=None , **snake_case_ : List[Any] ):
'''simple docstring'''
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Any , snake_case_ : Optional[Any]=6_4 , snake_case_ : int = 0 , snake_case_ : float = 5_1_2 / 1_5_0_0 , snake_case_ : Optional[int] = 3_2 , snake_case_ : Optional[int] = 1 , ):
'''simple docstring'''
snake_case__ : Dict = load_image(snake_case_ )
snake_case__ : Optional[int] = self.image_processor.size['''longest_edge''']
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self.image_processor(images=snake_case_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
snake_case__ : List[str] = self.get_inference_context()
with inference_context():
snake_case__ : Dict = self._ensure_tensor_on_device(snake_case_ , device=self.device )
snake_case__ : Tuple = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
snake_case__ : str = image_embeddings
snake_case__ : Dict = grid_points.shape[1]
snake_case__ : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , snake_case_ , snake_case_ ):
snake_case__ : str = grid_points[:, i : i + points_per_batch, :, :]
snake_case__ : Optional[Any] = input_labels[:, i : i + points_per_batch]
snake_case__ : List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : Optional[Any]=0.8_8 , snake_case_ : Dict=0.9_5 , snake_case_ : List[str]=0 , snake_case_ : Dict=1 , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = model_inputs.pop('''input_boxes''' )
snake_case__ : Union[str, Any] = model_inputs.pop('''is_last''' )
snake_case__ : List[str] = model_inputs.pop('''original_sizes''' ).tolist()
snake_case__ : int = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
snake_case__ : List[Any] = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case__ : Optional[int] = model_outputs['''pred_masks''']
snake_case__ : Optional[Any] = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
snake_case__ : str = model_outputs['''iou_scores''']
snake_case__ , snake_case__ , snake_case__ : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : List[str]=False , snake_case_ : int=False , snake_case_ : Tuple=0.7 , ):
'''simple docstring'''
snake_case__ : Tuple = []
snake_case__ : str = []
snake_case__ : Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
snake_case__ : Union[str, Any] = torch.cat(snake_case_ )
snake_case__ : Dict = torch.cat(snake_case_ )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Tuple = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
snake_case__ : str = {}
if output_rle_mask:
snake_case__ : Union[str, Any] = rle_mask
if output_bboxes_mask:
snake_case__ : Union[str, Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 347
| 1
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float = 1E-12 , UpperCamelCase__ : int = 100 , ):
"""simple docstring"""
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCamelCase__ ) == np.iscomplexobj(UpperCamelCase__ )
__lowercase = np.iscomplexobj(UpperCamelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCamelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowercase = False
__lowercase = 0
__lowercase = 0
__lowercase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowercase = np.dot(UpperCamelCase__ , UpperCamelCase__ )
# Normalize the resulting output vector.
__lowercase = w / np.linalg.norm(UpperCamelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowercase = vector.conj().T if is_complex else vector.T
__lowercase = np.dot(UpperCamelCase__ , np.dot(UpperCamelCase__ , UpperCamelCase__ ) )
# Check convergence.
__lowercase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowercase = True
__lowercase = lambda_
if is_complex:
__lowercase = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowercase = np.array([41, 4, 20] )
__lowercase = real_input_matrix.astype(np.complexaaa )
__lowercase = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowercase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowercase = real_input_matrix
__lowercase = real_vector
elif problem_type == "complex":
__lowercase = complex_input_matrix
__lowercase = complex_vector
# Our implementation.
__lowercase , __lowercase = power_iteration(UpperCamelCase__ , UpperCamelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowercase , __lowercase = np.linalg.eigh(UpperCamelCase__ )
# Last eigenvalue is the maximum one.
__lowercase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowercase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCamelCase__ ) - np.abs(UpperCamelCase__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 442
|
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
def get_matched_characters(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase__ )
__lowercase = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}'''
return "".join(UpperCamelCase__ )
# matching characters
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(UpperCamelCase__ )
+ match_count / len(UpperCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 442
| 1
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ) -> int:
_UpperCAmelCase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : Optional[int]="</s>" , __UpperCamelCase : str="<s>" , __UpperCamelCase : Tuple="<unk>" , __UpperCamelCase : int="<pad>" , __UpperCamelCase : Union[str, Any]="<mask>" , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Optional[Any] , ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase = json.load(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("\n" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Optional[int] ):
return len(self.encoder )
def UpperCAmelCase__ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = word
return word
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str ):
_UpperCAmelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCAmelCase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(" " ) )
return bpe_tokens
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] ):
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] ):
return self.decoder.get(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = "".join(__UpperCamelCase )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
_UpperCAmelCase = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_UpperCAmelCase = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str=False , **__UpperCamelCase : Optional[int] ):
_UpperCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCAmelCase = " " + text
return (text, kwargs)
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :Optional[int] = ["flax"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :Optional[int] = ["flax"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> int:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :str = ["flax"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> str:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :str = ["flax"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :str = ["flax"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :List[Any] = ["flax"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :Dict = ["flax"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> str:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :int = ["flax"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :List[Any] = ["flax"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :int = ["flax"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> int:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :Optional[int] = ["flax"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class _SCREAMING_SNAKE_CASE ( metaclass=A__ ):
UpperCAmelCase_ :Dict = ["flax"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCAmelCase ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
| 256
|
"""simple docstring"""
import math
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :List[str] = input("""Enter message: """ )
lowerCAmelCase_ :Any = int(input(f"""Enter key [2-{len(lowercase__ ) - 1}]: """ ) )
lowerCAmelCase_ :str = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowerCAmelCase_ :int = encrypt_message(lowercase__ , lowercase__ )
elif mode.lower().startswith("""d""" ):
lowerCAmelCase_ :List[str] = decrypt_message(lowercase__ , lowercase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :int = [""""""] * key
for col in range(lowercase__ ):
lowerCAmelCase_ :str = col
while pointer < len(lowercase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowercase__ )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = math.ceil(len(lowercase__ ) / key )
lowerCAmelCase_ :int = key
lowerCAmelCase_ :Tuple = (num_cols * num_rows) - len(lowercase__ )
lowerCAmelCase_ :Any = [""""""] * num_cols
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :Any = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase_ :List[Any] = 0
row += 1
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 256
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''realm'''
def __init__( self : Tuple ,__A : List[str]=3_0522 ,__A : Any=768 ,__A : Dict=128 ,__A : List[Any]=12 ,__A : Dict=12 ,__A : Union[str, Any]=8 ,__A : Optional[Any]=3072 ,__A : Optional[Any]="gelu_new" ,__A : str=0.1 ,__A : Dict=0.1 ,__A : Optional[int]=512 ,__A : Any=2 ,__A : List[Any]=0.02 ,__A : Any=1e-12 ,__A : List[str]=256 ,__A : Any=10 ,__A : int=1e-3 ,__A : Union[str, Any]=5 ,__A : List[Any]=320 ,__A : List[Any]=1335_3718 ,__A : Tuple=5000 ,__A : str=1 ,__A : Dict=0 ,__A : int=2 ,**__A : Dict ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
# Common config
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = retriever_proj_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = num_candidates
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
# Reader config
_lowercase = span_hidden_size
_lowercase = max_span_width
_lowercase = reader_layer_norm_eps
_lowercase = reader_beam_size
_lowercase = reader_seq_len
# Retrieval config
_lowercase = num_block_records
_lowercase = searcher_beam_size
| 67
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
for i in range(len(A_ ) - 1 , 0 , -1 ):
lowerCAmelCase__ : Optional[Any] = False
for j in range(A_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j - 1], unsorted[j]
lowerCAmelCase__ : Dict = True
for j in range(A_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCAmelCase__ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 450
| 0
|
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
snake_case_ = x
snake_case_ = y
for step in range(lowercase_ ): # noqa: B007
snake_case_ = a * a - b * b + x
snake_case_ = 2 * a * b + y
snake_case_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase( lowercase_ ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase( lowercase_ ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def UpperCamelCase( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ) -> Image.Image:
'''simple docstring'''
snake_case_ = Image.new("""RGB""" , (image_width, image_height) )
snake_case_ = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
snake_case_ = figure_width / image_width * image_height
snake_case_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case_ = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case_ = get_color_coded_rgb(lowercase_ )
else:
snake_case_ = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 712
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = ['audio_values', 'audio_mask']
def __init__( self , lowerCamelCase=2048 , lowerCamelCase=1 , lowerCamelCase=[16, 16] , lowerCamelCase=128 , lowerCamelCase=44100 , lowerCamelCase=86 , lowerCamelCase=2048 , lowerCamelCase=0.0 , **lowerCamelCase , ) -> List[Any]:
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase , )
snake_case_ = spectrogram_length
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = feature_size // self.patch_size[1]
snake_case_ = n_fft
snake_case_ = sampling_rate // hop_length_to_sampling_rate
snake_case_ = sampling_rate
snake_case_ = padding_value
snake_case_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCamelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def lowerCAmelCase_ ( self , lowerCamelCase ) -> np.ndarray:
snake_case_ = spectrogram(
lowerCamelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
snake_case_ = log_spec[:, :-1]
snake_case_ = log_spec - 20.0
snake_case_ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , **lowerCamelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case_ = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
snake_case_ = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
snake_case_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCamelCase ):
snake_case_ = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
snake_case_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
snake_case_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
snake_case_ = np.array(lowerCamelCase ).astype(np.floataa )
# convert into correct format for padding
snake_case_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
snake_case_ = np.ones([len(lowerCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
snake_case_ = padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase ) ):
snake_case_ = audio_features[i]
snake_case_ = feature
# return as BatchFeature
if return_attention_mask:
snake_case_ = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
snake_case_ = {"""audio_values""": padded_audio_features}
snake_case_ = BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
return encoded_inputs
| 161
| 0
|
"""simple docstring"""
import os
def a ( __UpperCAmelCase : Optional[int] ) -> str:
__magic_name__: List[str] = len(grid[0] )
__magic_name__: List[str] = len(__UpperCAmelCase )
__magic_name__: List[Any] = 0
__magic_name__: Union[str, Any] = 0
__magic_name__: List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__UpperCAmelCase ):
for j in range(n_rows - 3 ):
__magic_name__: List[str] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__magic_name__: str = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__magic_name__: List[str] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__magic_name__: Union[str, Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__magic_name__: str = max(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if max_product > largest:
__magic_name__: Optional[Any] = max_product
return largest
def a ( ) -> str:
__magic_name__: List[str] = []
with open(os.path.dirname(__UpperCAmelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__magic_name__: Tuple = [[int(__UpperCAmelCase ) for i in grid[j]] for j in range(len(__UpperCAmelCase ) )]
return largest_product(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 96
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = OpenAIGPTTokenizer
UpperCAmelCase__ = OpenAIGPTTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__: str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__magic_name__: Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: List[str] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
__magic_name__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__snake_case ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowerCamelCase__ ( self : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
return "lower newer", "lower newer"
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
__magic_name__: Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__magic_name__: Union[str, Any] = """lower"""
__magic_name__: Optional[int] = ["""low""", """er</w>"""]
__magic_name__: List[Any] = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
__magic_name__: Optional[int] = tokens + ["""<unk>"""]
__magic_name__: List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any=1_5 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__: str = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
__magic_name__: Dict = """This is a simple input"""
__magic_name__: Any = ["""This is a simple input 1""", """This is a simple input 2"""]
__magic_name__: int = ("""This is a simple input""", """This is a pair""")
__magic_name__: int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="""max_length""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
| 96
| 1
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : int = (boundary[1] - boundary[0]) / steps
_snake_case : Optional[Any] = boundary[0]
_snake_case : List[Any] = boundary[1]
_snake_case : List[str] = make_points(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[int] = 0.0
y += (h / 2.0) * f(lowerCAmelCase_ )
for i in x_i:
# print(i)
y += h * f(lowerCAmelCase_ )
y += (h / 2.0) * f(lowerCAmelCase_ )
return y
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = a + h
while x < (b - h):
yield x
_snake_case : str = x + h
def _a ( lowerCAmelCase_ ): # enter your function here
"""simple docstring"""
_snake_case : Tuple = (x - 0) * (x - 0)
return y
def _a ( ):
"""simple docstring"""
_snake_case : str = 0.0 # Lower bound of integration
_snake_case : Optional[int] = 1.0 # Upper bound of integration
_snake_case : str = 10.0 # define number of steps or resolution
_snake_case : int = [a, b] # define boundary of integration
_snake_case : Any = method_a(lowerCAmelCase_ , lowerCAmelCase_ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 716
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase :
_lowercase : Any = LEDConfig
_lowercase : Any = {}
_lowercase : Optional[Any] = """gelu"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any:
"""simple docstring"""
_snake_case : Dict = parent
_snake_case : Any = batch_size
_snake_case : List[str] = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_labels
_snake_case : int = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[Any] = pad_token_id
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
_snake_case : Dict = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
_snake_case : Dict = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int:
"""simple docstring"""
_snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder()
_snake_case : Union[str, Any] = inputs_dict['''input_ids''']
_snake_case : List[str] = input_ids[:1, :]
_snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :]
_snake_case : Dict = 1
# first forward pass
_snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
_snake_case , _snake_case : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0]
_snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : int = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase (a__ , a__ , unittest.TestCase ):
_lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Dict = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : str = False
_lowercase : Union[str, Any] = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = TFLEDModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] )
_snake_case : Optional[Any] = 2
_snake_case : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_snake_case : Dict = True
_snake_case : str = self.model_tester.seq_length
_snake_case : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
_snake_case : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Union[str, Any] = False
_snake_case : List[Any] = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
_snake_case : List[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
_snake_case : Union[str, Any] = model_class(lowercase__ )
_snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : str = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
_snake_case : int = True
_snake_case : List[str] = True
_snake_case : Tuple = model_class(lowercase__ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
pass
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class lowerCamelCase (unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : int = model(**lowercase__ )[0]
_snake_case : Dict = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : List[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
_snake_case : Tuple = model(**lowercase__ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
_snake_case : Dict = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 47
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "speech_to_text_2"
A_ = ["past_key_values"]
A_ = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __a=1_0000 , __a=6 , __a=2048 , __a=4 , __a=0.0 , __a=True , __a="relu" , __a=256 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=2 , __a=True , __a=1 , __a=0 , __a=2 , __a=1024 , **__a , ):
'''simple docstring'''
__a : str = vocab_size
__a : List[Any] = d_model
__a : Optional[int] = decoder_ffn_dim
__a : Optional[int] = decoder_layers
__a : str = decoder_attention_heads
__a : Any = dropout
__a : int = attention_dropout
__a : int = activation_dropout
__a : str = activation_function
__a : List[str] = init_std
__a : Union[str, Any] = decoder_layerdrop
__a : Optional[Any] = use_cache
__a : Dict = decoder_layers
__a : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__a : List[Any] = max_target_positions
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 476
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowercase : Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , *__a , **__a ):
'''simple docstring'''
super().__init__(*__a , **__a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __UpperCAmelCase ( self , __a=None ):
'''simple docstring'''
__a : Optional[int] = {}
if top_k is not None:
__a : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self , __a , **__a ):
'''simple docstring'''
return super().__call__(__a , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Any = load_image(__a )
__a : Optional[int] = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = self.model(**__a )
return model_outputs
def __UpperCAmelCase ( self , __a , __a=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__a : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
__a : Optional[int] = model_outputs.logits.softmax(-1 )[0]
__a , __a : List[Any] = probs.topk(__a )
elif self.framework == "tf":
__a : Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
__a : Tuple = tf.math.top_k(__a , k=__a )
__a , __a : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__a : List[str] = scores.tolist()
__a : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 476
| 1
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCamelCase( a ):
__a = [False] * len(_UpperCAmelCase )
__a = [-1] * len(_UpperCAmelCase )
def dfs(a , a ):
__a = True
__a = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCAmelCase , 1 - c )
for i in range(len(_UpperCAmelCase ) ):
if not visited[i]:
dfs(_UpperCAmelCase , 0 )
for i in range(len(_UpperCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE__:int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 716
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__:Dict = logging.getLogger()
def _lowerCamelCase( ):
__a = argparse.ArgumentParser()
parser.add_argument("-f" )
__a = parser.parse_args()
return args.f
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
__a = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ):
__a = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
__a = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase )
| 67
| 0
|
from __future__ import annotations
__a = tuple[int, int, int]
__a = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__a = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
__a = 'FOBHMDKEXQNRAULPGSJVTYICZW'
__a = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__a = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__a = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
__a = 'SGLCPQWZHKXAREONTFBVIYJUDM'
__a = 'HVSICLTYKQUBXDWAJZOMFGPREN'
__a = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
__a = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
__a = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def a ( snake_case__: RotorPositionT , snake_case__: RotorSelectionT , snake_case__: str ):
'''simple docstring'''
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(snake_case__ ) )) < 3:
lowercase_ = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(snake_case__ )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ = rotpos
if not 0 < rotorposa <= len(snake_case__ ):
lowercase_ = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(snake_case__ )
if not 0 < rotorposa <= len(snake_case__ ):
lowercase_ = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(snake_case__ )
if not 0 < rotorposa <= len(snake_case__ ):
lowercase_ = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(snake_case__ )
# Validates string and returns dict
lowercase_ = _plugboard(snake_case__ )
return rotpos, rotsel, pbdict
def a ( snake_case__: str ):
'''simple docstring'''
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(snake_case__ , snake_case__ ):
lowercase_ = F'''Plugboard setting isn\'t type string ({type(snake_case__ )})'''
raise TypeError(snake_case__ )
elif len(snake_case__ ) % 2 != 0:
lowercase_ = F'''Odd number of symbols ({len(snake_case__ )})'''
raise Exception(snake_case__ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
lowercase_ = set()
for i in pbstring:
if i not in abc:
lowercase_ = F'''\'{i}\' not in list of symbols'''
raise Exception(snake_case__ )
elif i in tmppbl:
lowercase_ = F'''Duplicate symbol ({i})'''
raise Exception(snake_case__ )
else:
tmppbl.add(snake_case__ )
del tmppbl
# Created the dictionary
lowercase_ = {}
for j in range(0 , len(snake_case__ ) - 1 , 2 ):
lowercase_ = pbstring[j + 1]
lowercase_ = pbstring[j]
return pb
def a ( snake_case__: str , snake_case__: RotorPositionT , snake_case__: RotorSelectionT = (rotora, rotora, rotora) , snake_case__: str = "" , ):
'''simple docstring'''
lowercase_ = text.upper()
lowercase_ , lowercase_ , lowercase_ = _validator(
snake_case__ , snake_case__ , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ = rotor_position
lowercase_ , lowercase_ , lowercase_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# rotor ra --------------------------
lowercase_ = abc.index(snake_case__ ) + rotorposa
lowercase_ = rotora[index % len(snake_case__ )]
# rotor rb --------------------------
lowercase_ = abc.index(snake_case__ ) + rotorposa
lowercase_ = rotora[index % len(snake_case__ )]
# rotor rc --------------------------
lowercase_ = abc.index(snake_case__ ) + rotorposa
lowercase_ = rotora[index % len(snake_case__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ = reflector[symbol]
# 2nd rotors
lowercase_ = abc[rotora.index(snake_case__ ) - rotorposa]
lowercase_ = abc[rotora.index(snake_case__ ) - rotorposa]
lowercase_ = abc[rotora.index(snake_case__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(snake_case__ ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(snake_case__ ):
lowercase_ = 0
rotorposa += 1
if rotorposa >= len(snake_case__ ):
lowercase_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
__a = 'This is my Python script that emulates the Enigma machine from WWII.'
__a = (1, 1, 1)
__a = 'pictures'
__a = (rotora, rotora, rotora)
__a = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 97
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__a = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a ( snake_case__: Dict , snake_case__: List[str] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
lowercase_ = TestCommand(*snake_case__ )
test_command.run()
lowercase_ = os.path.join(snake_case__ , '''README.md''' )
assert os.path.exists(snake_case__ )
lowercase_ = DatasetInfosDict.from_directory(snake_case__ )
lowercase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ = getattr(dataset_infos['''default'''] , snake_case__ ), getattr(expected_dataset_infos['''default'''] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 97
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
a :Dict = logging.getLogger(__name__)
a :int = {"facebook/bart-base": BartForConditionalGeneration}
a :List[str] = {"facebook/bart-base": BartTokenizer}
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__lowerCAmelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCAmelCase , )
parser.add_argument(
"""--config_name""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Where to store the final ONNX file.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
return args
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase="cpu" ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
return huggingface_model, tokenizer
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = """My friends are cool but they eat too many carbs."""
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
SCREAMING_SNAKE_CASE__ : int = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__lowerCAmelCase , )
logger.info("""Model exported to {}""".format(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Dict = onnxruntime.InferenceSession(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = ort_sess.run(
__lowerCAmelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__lowerCAmelCase ),
"""max_length""": np.array(__lowerCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] = 5
SCREAMING_SNAKE_CASE__ : Dict = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE__ : Tuple = torch.device(args.device )
SCREAMING_SNAKE_CASE__ : List[str] = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__lowerCAmelCase )
if args.max_length:
SCREAMING_SNAKE_CASE__ : Dict = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE__ : List[Any] = args.output_file_path
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 716
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Any:
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE__ : str = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
SCREAMING_SNAKE_CASE__ : Dict = fmt.format(__lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if msg is not None:
print(__lowerCAmelCase )
for k in val.keys():
recursive_print(__lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
print(__lowerCAmelCase , """:""" , val.size() )
else:
print(__lowerCAmelCase , """:""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE__ : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE__ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : List[str] = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE__ : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : Dict = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = param.view(*__lowerCAmelCase )
return param
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
# The converted output model.
SCREAMING_SNAKE_CASE__ : List[str] = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE__ : List[str] = input_state_dict.get("""args""" , __lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_args.num_layers
SCREAMING_SNAKE_CASE__ : Dict = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE__ : List[str] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE__ : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_state_dict["""checkpoint_version"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = 0.0
# The model.
SCREAMING_SNAKE_CASE__ : Any = input_state_dict["""model"""]
# The language model.
SCREAMING_SNAKE_CASE__ : Any = model["""language_model"""]
# The embeddings.
SCREAMING_SNAKE_CASE__ : str = lm["""embedding"""]
# The word embeddings.
SCREAMING_SNAKE_CASE__ : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE__ : Any = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE__ : Any = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE__ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
SCREAMING_SNAKE_CASE__ : List[Any] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE__ : str = layer_re.match(__lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE__ : Dict = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE__ : str = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE__ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
SCREAMING_SNAKE_CASE__ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
SCREAMING_SNAKE_CASE__ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE__ : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = masked_bias
SCREAMING_SNAKE_CASE__ : List[str] = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE__ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE__ : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : Any = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Store. No change of shape.
SCREAMING_SNAKE_CASE__ : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE__ : str = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : int = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transformer["""final_layernorm.weight"""]
SCREAMING_SNAKE_CASE__ : str = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE__ : Tuple = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> List[Any]:
# Create the argument parser.
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__lowerCAmelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__lowerCAmelCase , help="""An optional config json file describing the pre-trained model.""" , )
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : str = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : int = input_state_dict.get("""args""" , __lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE__ : Dict = """gelu_fast"""
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu_new"""
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE__ : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE__ : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCAmelCase , __lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE__ : Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE__ : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE__ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type(__lowerCAmelCase ).__name__
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__lowerCAmelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__lowerCAmelCase )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 12
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Union[str, Any] = "openai-gpt"
lowerCAmelCase__ :List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,UpperCAmelCase_=40_478 ,UpperCAmelCase_=512 ,UpperCAmelCase_=768 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=1E-5 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_="cls_index" ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=0.1 ,**UpperCAmelCase_ ,) -> Optional[int]:
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = afn
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_first_dropout
lowercase__ = summary_proj_to_labels
super().__init__(**UpperCAmelCase_ )
| 267
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE__ = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,) -> None:
lowercase__ = claim_vector
lowercase__ = allocated_resources_table
lowercase__ = maximum_claim_table
def _a ( self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _a ( self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _a ( self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _a ( self ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase_ ): i for i in self.__need()}
def _a ( self ,**UpperCAmelCase_ ) -> None:
lowercase__ = self.__need()
lowercase__ = self.__allocated_resources_table
lowercase__ = self.__available_resources()
lowercase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowercase__ = False
for each_need in need_list:
lowercase__ = True
for index, need in enumerate(UpperCAmelCase_ ):
if need > available_resources[index]:
lowercase__ = False
break
if execution:
lowercase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase__ = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(UpperCAmelCase_ )
# update available/freed resources stack
lowercase__ = np.array(UpperCAmelCase_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(UpperCAmelCase_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def _a ( self ) -> List[Any]:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(UpperCAmelCase_ ) + 1}'''
+ " ".join(F'''{it:>8}''' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(UpperCAmelCase_ ) + 1}'''
+ " ".join(F'''{it:>8}''' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(UpperCAmelCase_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(UpperCAmelCase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 1
|
import random
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Dict:
lowercase__ : Union[str, Any] = a[left_index]
lowercase__ : Optional[int] = left_index + 1
for j in range(left_index + 1 ,SCREAMING_SNAKE_CASE_ ):
if a[j] < pivot:
lowercase__ : int = a[i], a[j]
i += 1
lowercase__ : List[Any] = a[i - 1], a[left_index]
return i - 1
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Dict:
if left < right:
lowercase__ : int = random.randint(SCREAMING_SNAKE_CASE_ ,right - 1 )
lowercase__ : str = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ : int = partition(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
quick_sort_random(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE_ ,pivot_index + 1 ,SCREAMING_SNAKE_CASE_ ) # recursive quicksort to the right of the pivot point
def snake_case_ ( ) -> Optional[int]:
lowercase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n" ).strip()
lowercase__ : str = [int(SCREAMING_SNAKE_CASE_ ) for item in user_input.split("," )]
quick_sort_random(SCREAMING_SNAKE_CASE_ ,0 ,len(SCREAMING_SNAKE_CASE_ ) )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 713
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ) -> Tuple:
try:
lowercase__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Dict = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : List[Any] = strtobool(SCREAMING_SNAKE_CASE_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
__a : Optional[int] = parse_flag_from_env('''RUN_REMOTE''', default=False)
__a : List[Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
__a : Tuple = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__a : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__a : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__a : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__a : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__a : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__a : int = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__a : str = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
try:
import faiss # noqa
except ImportError:
lowercase__ : List[str] = unittest.skip("test requires faiss" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
try:
import regex # noqa
except ImportError:
lowercase__ : Any = unittest.skip("test requires regex" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires elasticsearch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Any = unittest.skip("test requires sqlalchemy" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
if not config.TORCH_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires PyTorch" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
if not config.TF_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires TensorFlow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not config.PIL_AVAILABLE:
lowercase__ : Optional[int] = unittest.skip("test requires Pillow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
def _require_spacy_model(SCREAMING_SNAKE_CASE_ ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE_ )
except ImportError:
return unittest.skip("test requires spacy" )(SCREAMING_SNAKE_CASE_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
return _require_spacy_model
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(SCREAMING_SNAKE_CASE_ )
else:
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test is packaged" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : str = unittest.skip("test requires remote" )(SCREAMING_SNAKE_CASE_ )
return test_case
def snake_case_ ( *SCREAMING_SNAKE_CASE_ ) -> int:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith("test" ):
for decorator in decorators:
lowercase__ : List[Any] = decorator(SCREAMING_SNAKE_CASE_ )
setattr(cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return cls
return decorate
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
pass
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[Any] = 0
a : Optional[Any] = 1
a : Union[str, Any] = 2
@contextmanager
def snake_case_ ( SCREAMING_SNAKE_CASE_=OfflineSimulationMode.CONNECTION_FAILS ,SCREAMING_SNAKE_CASE_=1E-16 ) -> List[str]:
lowercase__ : Dict = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Optional[Any] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
lowercase__ : Union[str, Any] = timeout
try:
return online_request(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : List[str] = url
lowercase__ : List[Any] = e.args[0]
lowercase__ : Optional[int] = (max_retry_error.args[0].replace("10.255.255.1" ,F"""OfflineMock[{url}]""" ),)
lowercase__ : Dict = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
raise requests.ConnectionError("Offline mode is enabled." ,request=SCREAMING_SNAKE_CASE_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" ,SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" ,SCREAMING_SNAKE_CASE_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" ,SCREAMING_SNAKE_CASE_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def snake_case_ ( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : Any = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE_ )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE_ )
@contextmanager
def snake_case_ ( ) -> int:
import gc
gc.collect()
lowercase__ : Dict = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def snake_case_ ( ) -> List[str]:
import gc
gc.collect()
lowercase__ : Optional[int] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 ,1_00 ,10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 ,1_00 ,10 ).tolist()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE_ ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
try:
return func(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE_ ).startswith("500" ) or str(SCREAMING_SNAKE_CASE_ ).startswith("502" ):
pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) )
raise err
return decorator.decorator(_wrapper ,SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase:
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = returncode
lowercase__ : str = stdout
lowercase__ : Any = stderr
async def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
while True:
lowercase__ : List[str] = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE_ )
else:
break
async def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ) -> _RunOutput:
if echo:
print("\nRunning: " ," ".join(SCREAMING_SNAKE_CASE_ ) )
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=SCREAMING_SNAKE_CASE_ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=SCREAMING_SNAKE_CASE_ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Dict = []
lowercase__ : Optional[int] = []
def tee(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="" ):
lowercase__ : str = line.decode("utf-8" ).rstrip()
sink.append(SCREAMING_SNAKE_CASE_ )
if not quiet:
print(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,file=SCREAMING_SNAKE_CASE_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,sys.stdout ,label="stdout:" ) ),
_read_stream(p.stderr ,lambda SCREAMING_SNAKE_CASE_ : tee(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,sys.stderr ,label="stderr:" ) ),
] ,timeout=SCREAMING_SNAKE_CASE_ ,)
return _RunOutput(await p.wait() ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=1_80 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=True ) -> _RunOutput:
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : int = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE_ ,env=SCREAMING_SNAKE_CASE_ ,stdin=SCREAMING_SNAKE_CASE_ ,timeout=SCREAMING_SNAKE_CASE_ ,quiet=SCREAMING_SNAKE_CASE_ ,echo=SCREAMING_SNAKE_CASE_ ) )
lowercase__ : Any = " ".join(SCREAMING_SNAKE_CASE_ )
if result.returncode > 0:
lowercase__ : int = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def snake_case_ ( ) -> List[Any]:
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" ,"gw0" )
lowercase__ : Union[str, Any] = re.sub(r"^gw" ,"" ,SCREAMING_SNAKE_CASE_ ,0 ,re.M )
return int(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( ) -> Tuple:
lowercase__ : List[str] = 2_95_00
lowercase__ : Union[str, Any] = pytest_xdist_worker_id()
return port + uniq_delta
| 298
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
class A_ ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self :List[str] , lowerCamelCase_ :int = 32 , lowerCamelCase_ :int = 64 , lowerCamelCase_ :int = 20 , lowerCamelCase_ :int = 768 , lowerCamelCase_ :Optional[int]=77 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :str = "silu" , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = None , lowerCamelCase_ :Optional[str] = "linear" , lowerCamelCase_ :Optional[str] = "prd" , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :Optional[int] = None , ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ : Optional[Any] =num_attention_heads
lowerCamelCase__ : List[Any] =attention_head_dim
lowerCamelCase__ : Dict =num_attention_heads * attention_head_dim
lowerCamelCase__ : Tuple =additional_embeddings
lowerCamelCase__ : Union[str, Any] =time_embed_dim or inner_dim
lowerCamelCase__ : Tuple =embedding_proj_dim or embedding_dim
lowerCamelCase__ : Dict =clip_embed_dim or embedding_dim
lowerCamelCase__ : Optional[Any] =Timesteps(lowerCamelCase_ , lowerCamelCase_ , 0 )
lowerCamelCase__ : Tuple =TimestepEmbedding(lowerCamelCase_ , lowerCamelCase_ , out_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if embedding_proj_norm_type is None:
lowerCamelCase__ : Union[str, Any] =None
elif embedding_proj_norm_type == "layer":
lowerCamelCase__ : Union[str, Any] =nn.LayerNorm(lowerCamelCase_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
lowerCamelCase__ : List[str] =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
if encoder_hid_proj_type is None:
lowerCamelCase__ : Optional[int] =None
elif encoder_hid_proj_type == "linear":
lowerCamelCase__ : List[str] =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
lowerCamelCase__ : str =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase_ ) )
if added_emb_type == "prd":
lowerCamelCase__ : str =nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase_ ) )
elif added_emb_type is None:
lowerCamelCase__ : List[Any] =None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
lowerCamelCase__ : Dict =nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dropout=lowerCamelCase_ , activation_fn='gelu' , attention_bias=lowerCamelCase_ , )
for d in range(lowerCamelCase_ )
] )
if norm_in_type == "layer":
lowerCamelCase__ : str =nn.LayerNorm(lowerCamelCase_ )
elif norm_in_type is None:
lowerCamelCase__ : Tuple =None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
lowerCamelCase__ : List[Any] =nn.LayerNorm(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Tuple =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
lowerCamelCase__ : Optional[Any] =causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowerCamelCase_ , persistent=lowerCamelCase_ )
lowerCamelCase__ : List[Any] =nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.zeros(1 , lowerCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str ={}
def fn_recursive_add_processors(lowerCamelCase_ :str , lowerCamelCase_ :torch.nn.Module , lowerCamelCase_ :Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
lowerCamelCase__ : Optional[int] =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , lowerCamelCase_ , lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return processors
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase_ :str , lowerCamelCase_ :torch.nn.Module , lowerCamelCase_ :Any ):
if hasattr(lowerCamelCase_ , 'set_processor' ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , lowerCamelCase_ , lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[torch.Tensor, float, int] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[torch.BoolTensor] = None , lowerCamelCase_ :bool = True , ):
"""simple docstring"""
lowerCamelCase__ : int =hidden_states.shape[0]
lowerCamelCase__ : Optional[Any] =timestep
if not torch.is_tensor(lowerCamelCase_ ):
lowerCamelCase__ : Any =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : Dict =timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase__ : Any =timesteps * torch.ones(lowerCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase__ : int =self.time_proj(lowerCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCamelCase__ : str =timesteps_projected.to(dtype=self.dtype )
lowerCamelCase__ : Union[str, Any] =self.time_embedding(lowerCamelCase_ )
if self.embedding_proj_norm is not None:
lowerCamelCase__ : Union[str, Any] =self.embedding_proj_norm(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.embedding_proj(lowerCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCamelCase__ : List[Any] =self.encoder_hidden_states_proj(lowerCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowerCamelCase__ : str =self.proj_in(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =self.positional_embedding.to(hidden_states.dtype )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : str =0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCamelCase__ : Dict =proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCamelCase__ : Dict =hidden_states[:, None, :]
lowerCamelCase__ : Optional[Any] =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCamelCase__ : List[str] =self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase_ , -1 , -1 )
additional_embeds.append(lowerCamelCase_ )
lowerCamelCase__ : int =torch.cat(
lowerCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCamelCase__ : int =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCamelCase__ : Optional[Any] =F.pad(
lowerCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCamelCase__ : Optional[Any] =hidden_states + positional_embeddings
if attention_mask is not None:
lowerCamelCase__ : List[Any] =(1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
lowerCamelCase__ : Any =F.pad(lowerCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
lowerCamelCase__ : Dict =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCamelCase__ : int =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCamelCase__ : str =self.norm_in(lowerCamelCase_ )
for block in self.transformer_blocks:
lowerCamelCase__ : Union[str, Any] =block(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
lowerCamelCase__ : str =self.norm_out(lowerCamelCase_ )
if self.prd_embedding is not None:
lowerCamelCase__ : Tuple =hidden_states[:, -1]
else:
lowerCamelCase__ : List[str] =hidden_states[:, additional_embeddings_len:]
lowerCamelCase__ : List[str] =self.proj_to_clip_embeddings(lowerCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : int =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 174
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionPanoramaPipeline
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase__ : Optional[int] =DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase__ : Any =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase__ : Union[str, Any] =CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Optional[int] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str]=0 ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =torch.manual_seed(lowerCamelCase_ )
lowerCamelCase__ : int ={
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Optional[Any] =self.get_dummy_components()
lowerCamelCase__ : int =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : List[str] =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple =sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : List[str] =np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] ='french fries'
lowerCamelCase__ : Optional[Any] =sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
lowerCamelCase__ : Any =output.images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : int =self.get_dummy_components()
lowerCamelCase__ : Any =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe(**lowerCamelCase_ , view_batch_size=2 )
lowerCamelCase__ : Dict =output.images
lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] =self.get_dummy_components()
lowerCamelCase__ : Any =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
lowerCamelCase__ : Optional[Any] =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Tuple =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Optional[int] =np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] =self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] =PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str =np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :Tuple=0 ):
"""simple docstring"""
lowerCamelCase__ : Any =torch.manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Dict ={
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='stabilityai/stable-diffusion-2-base'
lowerCamelCase__ : Tuple =DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
lowerCamelCase__ : Dict =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[Any] =self.get_inputs()
lowerCamelCase__ : List[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : int =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCamelCase__ : Dict =np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Dict =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[int] =self.get_inputs()
lowerCamelCase__ : Optional[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCamelCase__ : int =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =0
def callback_fn(lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :torch.FloatTensor ) -> None:
lowerCamelCase__ : int =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase__ : Any =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase__ : Optional[Any] =latents[0, -3:, -3:, -1]
lowerCamelCase__ : Union[str, Any] =np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase__ : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase__ : int =latents[0, -3:, -3:, -1]
lowerCamelCase__ : List[Any] =np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : Optional[int] ='stabilityai/stable-diffusion-2-base'
lowerCamelCase__ : Union[str, Any] =DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
lowerCamelCase__ : int =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[int] =self.get_inputs()
pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Dict ='stabilityai/stable-diffusion-2-base'
lowerCamelCase__ : Optional[int] =DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
lowerCamelCase__ : str =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Dict =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : Any =self.get_inputs()
lowerCamelCase__ : Optional[Any] =pipe(**lowerCamelCase_ )
lowerCamelCase__ : int =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 174
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : str = logging.get_logger(__name__)
A__ : Dict = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 'instructblip_vision_model'
def __init__( self , __UpperCamelCase=14_08 , __UpperCamelCase=61_44 , __UpperCamelCase=39 , __UpperCamelCase=16 , __UpperCamelCase=2_24 , __UpperCamelCase=14 , __UpperCamelCase="gelu" , __UpperCamelCase=1E-6 , __UpperCamelCase=0.0 , __UpperCamelCase=1E-10 , __UpperCamelCase=True , **__UpperCamelCase , )-> int:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : str = patch_size
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Any = attention_dropout
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[int] = qkv_bias
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 'instructblip_qformer'
def __init__( self , __UpperCamelCase=3_05_22 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=2 , __UpperCamelCase=14_08 , **__UpperCamelCase , )-> Union[str, Any]:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : str = position_embedding_type
UpperCAmelCase__ : List[Any] = cross_attention_frequency
UpperCAmelCase__ : Union[str, Any] = encoder_hidden_size
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , **__UpperCamelCase )-> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ : str = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class _lowercase ( _UpperCAmelCase ):
'''simple docstring'''
_A = 'instructblip'
_A = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=32 , **__UpperCamelCase )-> Union[str, Any]:
super().__init__(**__UpperCamelCase )
if vision_config is None:
UpperCAmelCase__ : List[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCAmelCase__ : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCAmelCase__ : List[str] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCAmelCase__ : Tuple = InstructBlipVisionConfig(**__UpperCamelCase )
UpperCAmelCase__ : str = InstructBlipQFormerConfig(**__UpperCamelCase )
UpperCAmelCase__ : Any = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCAmelCase__ : Dict = CONFIG_MAPPING[text_model_type](**__UpperCamelCase )
UpperCAmelCase__ : Any = self.text_config.tie_word_embeddings
UpperCAmelCase__ : Union[str, Any] = self.text_config.is_encoder_decoder
UpperCAmelCase__ : Any = num_query_tokens
UpperCAmelCase__ : List[str] = self.vision_config.hidden_size
UpperCAmelCase__ : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase__ : List[Any] = 1.0
UpperCAmelCase__ : Optional[int] = 0.02
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )-> Dict:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCamelCase , )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : str = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Optional[Any] = self.vision_config.to_dict()
UpperCAmelCase__ : str = self.qformer_config.to_dict()
UpperCAmelCase__ : str = self.text_config.to_dict()
UpperCAmelCase__ : Tuple = self.__class__.model_type
return output
| 719
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self )-> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Tuple = ort.SessionOptions()
UpperCAmelCase__ : List[str] = False
return options
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : Tuple = np.random.RandomState(0 )
UpperCAmelCase__ : Any = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images
UpperCAmelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase__ : int = "A red cat sitting on a park bench"
UpperCAmelCase__ : List[str] = np.random.RandomState(0 )
UpperCAmelCase__ : str = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ : int = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 660
| 0
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
__lowerCamelCase : Any = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 501
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# initialize config
if "resnet-50" in model_name:
lowerCamelCase_ : Union[str, Any] = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
lowerCamelCase_ : str = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
lowerCamelCase_ : Optional[int] = DetrConfig(use_timm_backbone=__UpperCAmelCase , backbone_config=__UpperCAmelCase )
# set label attributes
lowerCamelCase_ : str = '''panoptic''' in model_name
if is_panoptic:
lowerCamelCase_ : Tuple = 250
else:
lowerCamelCase_ : Any = 91
lowerCamelCase_ : Optional[int] = '''huggingface/label-files'''
lowerCamelCase_ : Tuple = '''coco-detection-id2label.json'''
lowerCamelCase_ : Union[str, Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[Any] = idalabel
lowerCamelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ : List[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = val
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ : Any = ''''''
if is_panoptic:
lowerCamelCase_ : Tuple = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase_ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase_ : str = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Dict = in_proj_weight[:256, :]
lowerCamelCase_ : Any = in_proj_bias[:256]
lowerCamelCase_ : Optional[Any] = in_proj_weight[256:512, :]
lowerCamelCase_ : Optional[Any] = in_proj_bias[256:512]
lowerCamelCase_ : List[Any] = in_proj_weight[-256:, :]
lowerCamelCase_ : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase_ : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase_ : Optional[int] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Optional[int] = in_proj_weight[:256, :]
lowerCamelCase_ : int = in_proj_bias[:256]
lowerCamelCase_ : int = in_proj_weight[256:512, :]
lowerCamelCase_ : Optional[int] = in_proj_bias[256:512]
lowerCamelCase_ : List[str] = in_proj_weight[-256:, :]
lowerCamelCase_ : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase_ : List[str] = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCamelCase_ : Optional[Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase_ : Tuple = in_proj_weight_cross_attn[:256, :]
lowerCamelCase_ : int = in_proj_bias_cross_attn[:256]
lowerCamelCase_ : int = in_proj_weight_cross_attn[256:512, :]
lowerCamelCase_ : List[str] = in_proj_bias_cross_attn[256:512]
lowerCamelCase_ : List[Any] = in_proj_weight_cross_attn[-256:, :]
lowerCamelCase_ : str = in_proj_bias_cross_attn[-256:]
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Tuple = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = get_detr_config(__UpperCAmelCase )
# load original model from torch hub
lowerCamelCase_ : str = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
lowerCamelCase_ : Any = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__UpperCAmelCase ).eval()
lowerCamelCase_ : List[str] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__UpperCAmelCase ):
if is_panoptic:
lowerCamelCase_ : str = '''detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase_ : str = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowerCamelCase_ : Union[str, Any] = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase_ : Any = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : int = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowerCamelCase_ : Any = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : List[str] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowerCamelCase_ : Tuple = state_dict.pop(__UpperCAmelCase )
lowerCamelCase_ : Tuple = val
# finally, create HuggingFace model and load state dict
lowerCamelCase_ : Union[str, Any] = DetrForSegmentation(__UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify our conversion on an image
lowerCamelCase_ : str = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowerCamelCase_ : Tuple = DetrImageProcessor(format=__UpperCAmelCase )
lowerCamelCase_ : List[Any] = processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = encoding['''pixel_values''']
lowerCamelCase_ : List[Any] = detr(__UpperCAmelCase )
lowerCamelCase_ : Dict = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
__lowerCamelCase : Dict = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 501
| 1
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase__ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowerCamelCase__ = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
lowerCamelCase__ = '|'.join(sys.argv[1:])
lowerCamelCase__ = re.compile(rF'''^({joined_dirs}).*?\.py$''')
lowerCamelCase__ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 40
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 1
|
"""simple docstring"""
def _lowerCamelCase( ):
return 1
def _lowerCamelCase( a ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowerCamelCase( a ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def _lowerCamelCase( a ):
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(UpperCamelCase__ )
def _lowerCamelCase( a ):
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(UpperCamelCase__ )
def _lowerCamelCase( a ):
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(UpperCamelCase__ )
def _lowerCamelCase( a ):
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(UpperCamelCase__ )
def _lowerCamelCase( a ):
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(UpperCamelCase__ )
def _lowerCamelCase( a = 2_0_0 ):
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 528
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : int = {}
if prompt is not None:
UpperCamelCase__ : int = prompt
if generate_kwargs is not None:
UpperCamelCase__ : int = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCamelCase__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
UpperCamelCase__ : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = load_image(__SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
UpperCamelCase__ : Optional[int] = self.model.config.model_type
if model_type == "git":
UpperCamelCase__ : Optional[int] = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase__ : str = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids
UpperCamelCase__ : Dict = [self.tokenizer.cls_token_id] + input_ids
UpperCamelCase__ : int = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
UpperCamelCase__ : Tuple = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCamelCase__ : int = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCamelCase__ : str = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCamelCase__ : Optional[Any] = None
return model_inputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['''input_ids'''] )
):
UpperCamelCase__ : Union[str, Any] = None
if generate_kwargs is None:
UpperCamelCase__ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCamelCase__ : Any = model_inputs.pop(self.model.main_input_name )
UpperCamelCase__ : Any = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = []
for output_ids in model_outputs:
UpperCamelCase__ : str = {
'''generated_text''': self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 285
| 0
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Tuple = DistilBertTokenizer
__lowerCamelCase : Union[str, Any] = DistilBertTokenizerFast
__lowerCamelCase : Optional[int] = True
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_lowerCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 489
|
'''simple docstring'''
import math
def __a(SCREAMING_SNAKE_CASE_ : int = 100 ):
'''simple docstring'''
_lowerCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
_lowerCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489
| 1
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
_lowerCAmelCase = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("Enter a string ").strip()
_SCREAMING_SNAKE_CASE = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 18
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowerCamelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase__ ( UpperCAmelCase__=None ) -> List[str]:
if subparsers is not None:
A_ = subparsers.add_parser("""tpu-config""", description=_description )
else:
A_ = argparse.ArgumentParser("""Accelerate tpu-config command""", description=_description )
# Core arguments
A_ = parser.add_argument_group(
"""Config Arguments""", """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""Path to the config file to use for accelerate.""", )
config_args.add_argument(
"""--tpu_name""", default=UpperCAmelCase__, help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""", )
config_args.add_argument(
"""--tpu_zone""", default=UpperCAmelCase__, help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""", )
A_ = parser.add_argument_group("""TPU Arguments""", """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""", action="""store_true""", help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""", )
pod_args.add_argument(
"""--command_file""", default=UpperCAmelCase__, help="""The path to the file containing the commands to run on the pod on startup.""", )
pod_args.add_argument(
"""--command""", action="""append""", nargs="""+""", help="""A command to run on the pod. Can be passed multiple times.""", )
pod_args.add_argument(
"""--install_accelerate""", action="""store_true""", help="""Whether to install accelerate on the pod. Defaults to False.""", )
pod_args.add_argument(
"""--accelerate_version""", default="""latest""", help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""", )
pod_args.add_argument(
"""--debug""", action="""store_true""", help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
A_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A_ = defaults.command_file
if not args.command and defaults.commands is not None:
A_ = defaults.commands
if not args.tpu_name:
A_ = defaults.tpu_name
if not args.tpu_zone:
A_ = defaults.tpu_zone
if args.accelerate_version == "dev":
A_ = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
A_ = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ), UpperCAmelCase__ ):
A_ = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file, """r""" ) as f:
A_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0], UpperCAmelCase__ ):
A_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A_ = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
A_ = """; """.join(UpperCAmelCase__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A_ = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(UpperCAmelCase__ )}''' )
return
subprocess.run(UpperCAmelCase__ )
print("""Successfully setup pod.""" )
def UpperCAmelCase__ ( ) -> int:
A_ = tpu_command_parser()
A_ = parser.parse_args()
tpu_command_launcher(UpperCAmelCase__ )
| 288
| 0
|
"""simple docstring"""
def __A ( a_ :list[list[int]] , a_ :int , a_ :int , a_ :set) -> int:
__a , __a : List[Any] = len(a_), len(grid[0])
if (
min(a_ , a_) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col))
__a : Optional[Any] = 0
count += depth_first_search(a_ , row + 1 , a_ , a_)
count += depth_first_search(a_ , row - 1 , a_ , a_)
count += depth_first_search(a_ , a_ , col + 1 , a_)
count += depth_first_search(a_ , a_ , col - 1 , a_)
visit.remove((row, col))
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__a : List[str] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
__a : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : Optional[int] = CLIPTextModel(_UpperCAmelCase )
__a : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
__a : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' )
if str(_UpperCAmelCase ).startswith('''mps''' ):
__a : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__a : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Dict = self.get_dummy_components()
__a : Any = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase )
__a : str = sd_pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Optional[int] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Optional[Any] = self.get_dummy_components()
__a : Dict = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : List[Any] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
__a : Union[str, Any] = '''french fries'''
__a : str = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
__a : Dict = output.images
__a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Tuple = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Dict = self.get_dummy_components()
__a : str = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : Optional[int] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase )
__a : List[str] = [inputs['''prompt''']] * 2
__a : Optional[Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
__a : Optional[Any] = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
__a : Tuple = image / 2 + 0.5
__a : str = image.permute(0 , 3 , 1 , 2 )
__a : List[str] = image.repeat(2 , 1 , 1 , 1 )
__a : int = sd_pipe(**_UpperCAmelCase ).images
__a : Optional[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__a : List[str] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.get_dummy_components()
__a : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
__a : str = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : List[str] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
__a : Any = sd_pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
__a : Optional[int] = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__a : int = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCamelCase ( self ):
__a : Any = self.get_dummy_components()
__a : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
__a : Any = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
__a : Dict = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : str = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type='''pt''' ) )[0]
__a : List[Any] = components['''vae''']
__a : List[Any] = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__a : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
__a : str = pipe(**_UpperCAmelCase )[0]
__a : Union[str, Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_UpperCAmelCase , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : List[str] = torch.manual_seed(_UpperCAmelCase )
__a : str = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__a : List[str] = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : str = self.get_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase )
__a : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : List[str] = self.get_inputs()
__a : Optional[int] = pipe(**_UpperCAmelCase ).images
__a : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : Tuple = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase )
__a : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : List[str] = self.get_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : Any = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
__a : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__a : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__a : int = latents[0, -3:, -3:, -1]
__a : int = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__a : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__a : List[str] = latents[0, -3:, -3:, -1]
__a : Tuple = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__a : Union[str, Any] = False
__a : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
__a : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : str = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
__a : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : List[Any] = self.get_inputs()
__a : Tuple = pipe(**_UpperCAmelCase )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _lowerCamelCase ( self ):
__a : List[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__a : str = inputs['''image'''].resize((504, 504) )
__a : Tuple = '''timbrooks/instruct-pix2pix'''
__a : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : List[Any] = pipe(**_UpperCAmelCase )
__a : int = output.images[0]
__a : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__a : Union[str, Any] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 101
| 1
|
'''simple docstring'''
from math import factorial
_a : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def _a (lowercase__ : int ) -> int:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase__ ) )
def _a (lowercase__ : int = 6_0 , lowercase__ : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__snake_case = 0
# the cached sizes of the previous chains
__snake_case = {}
for start_chain_element in range(1 , lowercase__ ):
# The temporary set will contain the elements of the chain
__snake_case = set()
__snake_case = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__snake_case = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase__ )
chain_set_length += 1
__snake_case = digit_factorial_sum(lowercase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__snake_case = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 56
|
'''simple docstring'''
from __future__ import annotations
import math
def _a (lowercase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def _a (lowercase__ : int ) -> list[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
__snake_case = []
for num in range(len(lowercase__ ) ):
__snake_case = 0
while 2 * i * i <= odd_composites[num]:
__snake_case = odd_composites[num] - 2 * i * i
if is_prime(lowercase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowercase__ ) == n:
return list_nums
return []
def _a () -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return self._get_dummy_components()
def a__ (self , A , A=0 ) -> Optional[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 352
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__A) - np.asarray(__A)) ** 2))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__A , __A)) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase ():
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''')
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
print('''With Numpy''')
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
benchmark()
| 352
| 1
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class __snake_case :
_a : str= field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_a : str= field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_a : int= field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_a : bool= field(
default=lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.task_name.lower()
class __snake_case ( lowerCAmelCase ):
_a : Any= "train"
_a : Optional[int]= "dev"
_a : Union[str, Any]= "test"
class __snake_case ( lowerCAmelCase ):
_a : GlueDataTrainingArguments
_a : str
_a : List[InputFeatures]
def __init__( self ,snake_case ,snake_case ,snake_case = None ,snake_case = Split.train ,snake_case = None ,):
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ,snake_case ,)
lowercase : Optional[int] = args
lowercase : Union[str, Any] = glue_processors[args.task_name]()
lowercase : Tuple = glue_output_modes[args.task_name]
if isinstance(snake_case ,snake_case ):
try:
lowercase : int = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowercase : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" ,)
lowercase : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase : Any = label_list[2], label_list[1]
lowercase : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase : List[str] = cached_features_file + """.lock"""
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
lowercase : Tuple = time.time()
lowercase : Union[str, Any] = torch.load(snake_case )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" ,time.time() - start )
else:
logger.info(f"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
lowercase : Optional[int] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowercase : int = self.processor.get_test_examples(args.data_dir )
else:
lowercase : Dict = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowercase : Any = examples[:limit_length]
lowercase : Optional[int] = glue_convert_examples_to_features(
snake_case ,snake_case ,max_length=args.max_seq_length ,label_list=snake_case ,output_mode=self.output_mode ,)
lowercase : List[Any] = time.time()
torch.save(self.features ,snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,snake_case ):
'''simple docstring'''
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.label_list
| 336
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowercase : Optional[Any] = """http://www.mocksite.com/file1.txt"""
lowercase : str = """\"text\": [\"foo\", \"foo\"]"""
lowercase : Tuple = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __snake_case :
_a : Tuple= 200
_a : int= {"Content-Length": "100"}
_a : Tuple= {}
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return [bytes(snake_case ,"""utf-8""" )]
def _snake_case( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE__ , """request""" , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = URL
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = url
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = [url]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = {"""train""": url}
lowercase : Union[str, Any] = """dummy"""
lowercase : List[Any] = """downloads"""
lowercase : Optional[Any] = tmp_path
lowercase : Dict = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , use_etag=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = dl_manager.download(SCREAMING_SNAKE_CASE__ )
lowercase : str = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = [downloaded_paths]
lowercase : Any = [urls]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in downloaded_paths.keys()
lowercase : Tuple = downloaded_paths.values()
lowercase : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ )
lowercase : str = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowercase : Tuple = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowercase : Dict = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = filename
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = [filename]
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = {"""train""": filename}
lowercase : int = """dummy"""
lowercase : List[Any] = xz_file.parent
lowercase : Union[str, Any] = """extracted"""
lowercase : List[str] = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE__ , use_etag=SCREAMING_SNAKE_CASE__ , )
lowercase : List[Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE__ , download_config=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = dl_manager.extract(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : int = [extracted_paths]
lowercase : Optional[Any] = [paths]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert "train" in extracted_paths.keys()
lowercase : Optional[Any] = extracted_paths.values()
lowercase : Optional[int] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase : List[Any] = Path(SCREAMING_SNAKE_CASE__ )
lowercase : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE__ , etag=SCREAMING_SNAKE_CASE__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase : Optional[int] = extracted_path.read_text()
lowercase : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE__ , start=1 ):
lowercase : Optional[Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : str = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = request.getfixturevalue(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE__ ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert num_tar == 1
assert num_jsonl == 2
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 336
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : List[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
a_ : List[str] = MaskFormerConfig(backbone_config=SCREAMING_SNAKE_CASE__ )
a_ : Dict = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
a_ : List[str] = 847
a_ : Optional[Any] = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
a_ : List[str] = 150
a_ : Tuple = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
a_ : Union[str, Any] = 171
a_ : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
a_ : Optional[Any] = 133
a_ : List[str] = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
a_ : Union[str, Any] = 19
a_ : Any = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
a_ : int = 65
a_ : Any = "mapillary-vistas-id2label.json"
a_ : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type="dataset" ), "r" ) )
a_ : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : str = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
a_ : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a_ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a_ : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
a_ : str = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Tuple = in_proj_weight[:dim, :]
a_ : Union[str, Any] = in_proj_bias[: dim]
a_ : Dict = in_proj_weight[
dim : dim * 2, :
]
a_ : Tuple = in_proj_bias[
dim : dim * 2
]
a_ : Optional[int] = in_proj_weight[
-dim :, :
]
a_ : str = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
# fmt: off
a_ : List[str] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
a_ : int = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : int = in_proj_weight[: hidden_size, :]
a_ : Tuple = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
a_ : Optional[int] = in_proj_weight[-hidden_size :, :]
a_ : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
a_ : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
a_ : str = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ : Dict = in_proj_weight[: hidden_size, :]
a_ : Optional[Any] = in_proj_bias[:config.hidden_size]
a_ : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
a_ : str = in_proj_bias[hidden_size : hidden_size * 2]
a_ : List[Any] = in_proj_weight[-hidden_size :, :]
a_ : Dict = in_proj_bias[-hidden_size :]
# fmt: on
def lowerCAmelCase_ ( ) -> torch.Tensor:
a_ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False ) -> Optional[int]:
a_ : List[Any] = get_maskformer_config(SCREAMING_SNAKE_CASE__ )
# load original state_dict
with open(SCREAMING_SNAKE_CASE__, "rb" ) as f:
a_ : int = pickle.load(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
a_ : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__, config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
# update to torch tensors
for key, value in state_dict.items():
a_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# load 🤗 model
a_ : Tuple = MaskFormerForInstanceSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
for name, param in model.named_parameters():
print(SCREAMING_SNAKE_CASE__, param.shape )
a_ , a_ : int = model.load_state_dict(SCREAMING_SNAKE_CASE__, strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
a_ : List[Any] = prepare_img()
if "vistas" in model_name:
a_ : int = 65
elif "cityscapes" in model_name:
a_ : str = 65_535
else:
a_ : int = 255
a_ : List[str] = True if "ade" in model_name else False
a_ : Any = MaskFormerImageProcessor(ignore_index=SCREAMING_SNAKE_CASE__, reduce_labels=SCREAMING_SNAKE_CASE__ )
a_ : int = image_processor(SCREAMING_SNAKE_CASE__, return_tensors="pt" )
a_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
a_ : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 370
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a ( a__ ):
snake_case__ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BICUBIC , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 2_55 , _snake_case = True , _snake_case = None , _snake_case = None , _snake_case = True , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
lowerCAmelCase = size if size is not None else {'shortest_edge': 2_24}
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case , param_name='crop_size' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(_snake_case , size=size['shortest_edge'] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(_snake_case , param_name='size' , default_to_square=_snake_case )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(_snake_case , param_name='crop_size' , default_to_square=_snake_case )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 4
|
"""simple docstring"""
from typing import Any
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = data
lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return F'Node({self.data})'
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase = self.head
while node:
yield node.data
lowerCAmelCase = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __getitem__( self , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase = self.head
for _ in range(_snake_case ):
lowerCAmelCase = current.next
lowerCAmelCase = data
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(len(self ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
self.insert_nth(0 , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase = Node(_snake_case )
if self.head is None:
lowerCAmelCase = new_node
elif index == 0:
lowerCAmelCase = self.head # link new_node to head
lowerCAmelCase = new_node
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = new_node
def UpperCamelCase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _snake_case = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase = self.head # default first node
if index == 0:
lowerCAmelCase = self.head.next
else:
lowerCAmelCase = self.head
for _ in range(index - 1 ):
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next
lowerCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = self.head
while current:
# Store the current node's next node.
lowerCAmelCase = current.next
# Make the current node's next point backwards
lowerCAmelCase = prev
# Make the previous node be the current node
lowerCAmelCase = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase = next_node
# Return prev in order to put the head at the end
lowerCAmelCase = prev
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ():
from doctest import testmod
testmod()
lowerCAmelCase = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'Element at Position 1: {linked_list[1]}' )
lowerCAmelCase = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'length of linked_list is : {len(_UpperCAmelCase )}' )
if __name__ == "__main__":
main()
| 4
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__UpperCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a_ ( _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
return max(metric_fn(_A , _A ) for gt in ground_truths )
def a_ ( _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = []
if args.gold_data_mode == "qa":
snake_case__ = pd.read_csv(_A , sep='\t' , header=_A )
for answer_list in data[1]:
snake_case__ = ast.literal_eval(_A )
answers.append(_A )
else:
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = [[reference] for reference in references]
snake_case__ = snake_case__ = snake_case__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
snake_case__ = 100.0 * em / total
snake_case__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def a_ ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
snake_case__ = args.k
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = [line.strip() for line in open(_A , 'r' ).readlines()]
snake_case__ = snake_case__ = 0
for hypo, reference in zip(_A , _A ):
snake_case__ = set(hypo.split('\t' )[:k] )
snake_case__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def a_ ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
def strip_title(_A ):
if title.startswith('"' ):
snake_case__ = title[1:]
if title.endswith('"' ):
snake_case__ = title[:-1]
return title
snake_case__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='pt' , padding=_A , truncation=_A , )['input_ids'].to(args.device )
snake_case__ = rag_model.rag.question_encoder(_A )
snake_case__ = question_enc_outputs[0]
snake_case__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
snake_case__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case__ = []
for docs in all_docs:
snake_case__ = [strip_title(_A ) for title in docs['title']]
provenance_strings.append('\t'.join(_A ) )
return provenance_strings
def a_ ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with torch.no_grad():
snake_case__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='pt' , padding=_A , truncation=_A )
snake_case__ = inputs_dict.input_ids.to(args.device )
snake_case__ = inputs_dict.attention_mask.to(args.device )
snake_case__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('Q: {} - A: {}'.format(_A , _A ) )
return answers
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=_A , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=_A , choices=['exact', 'compressed', 'legacy'] , type=_A , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=_A , type=_A , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=_A , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=_A , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=_A , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=_A , type=_A , required=_A , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=_A , type=_A , required=_A , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=_A , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=_A , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=_A , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=_A , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=_A , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=_A , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
snake_case__ = parser.parse_args()
snake_case__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def a_ ( _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = {}
if args.model_type is None:
snake_case__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
snake_case__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
snake_case__ = args.n_docs
if args.index_name is not None:
snake_case__ = args.index_name
if args.index_path is not None:
snake_case__ = args.index_path
else:
snake_case__ = BartForConditionalGeneration
snake_case__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , _A )
snake_case__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
snake_case__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(_A ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
snake_case__ = RagRetriever.from_pretrained(_A , **_A )
snake_case__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
snake_case__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
snake_case__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
snake_case__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('\n'.join(_A ) + '\n' )
preds_file.flush()
snake_case__ = []
if len(_A ) > 0:
snake_case__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('\n'.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = get_args()
main(args)
| 372
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase : Optional[Any] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Path , UpperCamelCase: Union[str, None] = None , UpperCamelCase: Union[List[str], None] = None , UpperCamelCase: Union[str, List[str], None] = None , UpperCamelCase: bool = True , ) -> int:
snake_case__ = [file for file in os.listdir(UpperCamelCase ) if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) )]
if identifier is not None:
snake_case__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase , UpperCamelCase ):
for n_ in n_identifier:
snake_case__ = [file for file in files if n_ not in file]
else:
snake_case__ = [file for file in files if n_identifier not in file]
snake_case__ = ignore_files or []
ignore_files.append('__init__.py' )
snake_case__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCamelCase )
if only_modules:
snake_case__ = file.split('.' )[0]
try:
snake_case__ = getattr(UpperCamelCase , UpperCamelCase )
snake_case__ = doctest.DocTestSuite(UpperCamelCase )
snake_case__ = unittest.TextTestRunner().run(UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
snake_case__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase_ ( self: Tuple ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'modeling'
snake_case__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase , ignore_files=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case__ = Path('src/transformers' )
snake_case__ = 'tokenization'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'configuration'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = Path('src/transformers' )
snake_case__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCamelCase , n_identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]:
snake_case__ = Path('docs/source' )
snake_case__ = ['favicon.ico']
self.analyze_directory(UpperCamelCase , ignore_files=UpperCamelCase , only_modules=UpperCamelCase )
| 372
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ :List[str] = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[str] = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 618
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 618
| 1
|
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_lowerCAmelCase : Tuple = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_lowerCAmelCase : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _A ( snake_case__ : List[Any] , snake_case__ : Optional[Any]=False ):
snake_case__ ,snake_case__ : Optional[int] = create_model(
'''HTSAT-tiny''' , '''roberta''' , A__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=A__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def _A ( snake_case__ : List[Any] ):
snake_case__ : str = {}
snake_case__ : Optional[int] = R'''.*sequential.(\d+).*'''
snake_case__ : Any = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : Optional[Any] = key.replace(A__ , A__ )
if re.match(A__ , A__ ):
# replace sequential layers with list
snake_case__ : List[Any] = re.match(A__ , A__ ).group(1 )
snake_case__ : Any = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(A__ )//3}.linear.''' )
elif re.match(A__ , A__ ):
snake_case__ : Optional[int] = int(re.match(A__ , A__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case__ : List[str] = 1 if projecton_layer == 0 else 2
snake_case__ : str = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case__ : Union[str, Any] = value
snake_case__ : Dict = mixed_qkv.size(0 ) // 3
snake_case__ : List[Any] = mixed_qkv[:qkv_dim]
snake_case__ : Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case__ : Dict = mixed_qkv[qkv_dim * 2 :]
snake_case__ : str = query_layer
snake_case__ : int = key_layer
snake_case__ : Optional[Any] = value_layer
else:
snake_case__ : Dict = value
return model_state_dict
def _A ( snake_case__ : Tuple , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=False ):
snake_case__ ,snake_case__ : Dict = init_clap(A__ , enable_fusion=A__ )
clap_model.eval()
snake_case__ : Any = clap_model.state_dict()
snake_case__ : Union[str, Any] = rename_state_dict(A__ )
snake_case__ : str = ClapConfig()
snake_case__ : Union[str, Any] = enable_fusion
snake_case__ : Any = ClapModel(A__ )
# ignore the spectrogram embedding layer
model.load_state_dict(A__ , strict=A__ )
model.save_pretrained(A__ )
transformers_config.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 714
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 'encoder-decoder'
_lowerCAmelCase = True
def __init__( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case__ : List[str] = kwargs.pop('''encoder''' )
snake_case__ : Any = encoder_config.pop('''model_type''' )
snake_case__ : List[str] = kwargs.pop('''decoder''' )
snake_case__ : str = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case__ : Tuple = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : Optional[Any] = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
snake_case__ : str = True
@classmethod
def lowercase__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case__ : Optional[int] = True
snake_case__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = copy.deepcopy(self.__dict__ )
snake_case__ : List[Any] = self.encoder.to_dict()
snake_case__ : str = self.decoder.to_dict()
snake_case__ : Any = self.__class__.model_type
return output
| 694
| 0
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 572
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
| 0
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[int] ) -> Any:
_UpperCamelCase =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_UpperCamelCase =AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_UpperCamelCase =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_UpperCamelCase =shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_UpperCamelCase =model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
_UpperCamelCase =optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean()
_UpperCamelCase =-(labels.shape[-1] * loss.item())
_UpperCamelCase =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 404
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def snake_case_ ( A_ : Dict, A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_lowerCamelCase : str = DatasetInfosDict.from_directory(A_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def snake_case_ ( A_ : str, A_ : DatasetInfo ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = str(A_ )
dataset_info.write_to_directory(A_ )
_lowerCamelCase : str = DatasetInfo.from_directory(A_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A_, '''dataset_info.json''' ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(A_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCamelCase : str = yaml.safe_dump(A_ )
_lowerCamelCase : Tuple = yaml.safe_load(A_ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = DatasetInfo()
_lowerCamelCase : Dict = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def snake_case_ ( A_ : Optional[Any], A_ : DatasetInfosDict ):
'''simple docstring'''
_lowerCamelCase : List[str] = str(A_ )
dataset_infos_dict.write_to_directory(A_ )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(A_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A_, '''README.md''' ) )
| 83
| 0
|
import functools
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = len(__lowerCAmelCase )
a__ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
a__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657
| 0
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( __magic_name__ , __magic_name__="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) as f:
lowercase = json.load(__magic_name__ )
lowercase = {}
lowercase = []
lowercase = []
for key, info in class_info.items():
lowercase = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__magic_name__ ) )
lowercase = thing_ids
lowercase = class_names
return metadata
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :str=7 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Tuple=30 , lowerCAmelCase__ :int=400 , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Dict=255 , lowerCAmelCase__ :List[Any]="shi-labs/oneformer_demo" , lowerCAmelCase__ :List[str]="ade20k_panoptic.json" , lowerCAmelCase__ :List[str]=10 , ) ->Dict:
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = class_info_file
lowercase = prepare_metadata(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = num_text
lowercase = repo_path
# for the post_process_functions
lowercase = 2
lowercase = 10
lowercase = 10
lowercase = 3
lowercase = 4
lowercase = num_labels
lowercase = do_reduce_labels
lowercase = ignore_index
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def SCREAMING_SNAKE_CASE( self :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str]=False ) ->List[Any]:
if not batched:
lowercase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size["shortest_edge"] * h / w )
lowercase = self.size["shortest_edge"]
elif w > h:
lowercase = self.size["shortest_edge"]
lowercase = int(self.size["shortest_edge"] * w / h )
else:
lowercase = self.size["shortest_edge"]
lowercase = self.size["shortest_edge"]
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
lowercase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
def SCREAMING_SNAKE_CASE( self :Tuple ) ->List[Any]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCamelCase_ ( __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : Optional[int] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCamelCase : Tuple = image_processing_class
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Optional[int]:
lowercase = OneFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE( self :str ) ->List[str]:
return self.image_processing_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE( self :Dict ) ->Optional[Any]:
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "ignore_index" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "class_info_file" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "num_text" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "repo_path" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "metadata" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_reduce_labels" ) )
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE( self :str ) ->Optional[Any]:
# Initialize image_processor
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->int:
# Initialize image_processor
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->int:
# Initialize image_processor
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowercase , lowercase = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Union[str, Any]="np" ) ->Optional[Any]:
lowercase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowercase = self.image_processing_tester.num_labels
lowercase = None
lowercase = None
lowercase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
if with_segmentation_maps:
lowercase = num_labels
if is_instance_map:
lowercase = list(range(lowerCAmelCase__ ) ) * 2
lowercase = dict(enumerate(lowerCAmelCase__ ) )
lowercase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowercase = [Image.fromarray(lowerCAmelCase__ ) for annotation in annotations]
lowercase = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , lowerCAmelCase__ , return_tensors="pt" , instance_id_to_semantic_id=lowerCAmelCase__ , pad_and_return_pixel_mask=lowerCAmelCase__ , )
return inputs
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->List[str]:
pass
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[Any]:
def common(lowerCAmelCase__ :str=False , lowerCAmelCase__ :Dict=None ):
lowercase = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase__ , is_instance_map=lowerCAmelCase__ , segmentation_type=lowerCAmelCase__ )
lowercase = inputs["mask_labels"]
lowercase = inputs["class_labels"]
lowercase = inputs["pixel_values"]
lowercase = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase__ )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
def SCREAMING_SNAKE_CASE( self :Dict ) ->Union[str, Any]:
lowercase = np.zeros((20, 50) )
lowercase = 1
lowercase = 1
lowercase = 1
lowercase = binary_mask_to_rle(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->List[Any]:
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowercase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowercase = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ , target_sizes=lowerCAmelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Union[str, Any]:
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = image_processor.post_process_instance_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def SCREAMING_SNAKE_CASE( self :str ) ->Optional[Any]:
lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase = image_processor.post_process_panoptic_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 441
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase_ ( __a ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Optional[int]:
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "depth_multiplier" ) )
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=13 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Optional[int]=32 , lowerCAmelCase__ :Any=0.25 , lowerCAmelCase__ :Dict=8 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :List[str]=6 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Dict="relu6" , lowerCAmelCase__ :Tuple=1280 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=10 , lowerCAmelCase__ :int=None , ) ->List[str]:
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = depth_multiplier
lowercase = depth_divisible_by
lowercase = min_depth
lowercase = expand_ratio
lowercase = tf_padding
lowercase = output_stride
lowercase = first_layer_is_expansion
lowercase = finegrained_output
lowercase = hidden_act
lowercase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase = classifier_dropout_prob
lowercase = use_labels
lowercase = is_training
lowercase = num_labels
lowercase = initializer_range
lowercase = scope
def SCREAMING_SNAKE_CASE( self :str ) ->Dict:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) ->Any:
lowercase = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) ->Union[str, Any]:
lowercase = self.num_labels
lowercase = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) ->Union[str, Any]:
lowercase = self.num_labels
lowercase = MobileNetVaForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE( self :Any ) ->int:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase : List[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : int = False
UpperCamelCase : str = False
UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Dict:
lowercase = MobileNetVaModelTester(self )
lowercase = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Dict ) ->Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Optional[int]:
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE( self :Any ) ->str:
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->str:
pass
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Dict:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowerCAmelCase__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :int ) ->List[str]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :int ) ->Optional[int]:
def check_hidden_states_output(lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ):
lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = outputs.hidden_states
lowercase = 16
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Any ) ->Tuple:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :int ) ->str:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->str:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __snake_case ( ):
'''simple docstring'''
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE( self :str ) ->Any:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Union[str, Any]:
lowercase = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(lowerCAmelCase__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
# verify the logits
lowercase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE( self :Tuple ) ->List[Any]:
lowercase = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
lowercase = model.to(lowerCAmelCase__ )
lowercase = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
lowercase = prepare_img()
lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits
# verify the logits
lowercase = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
lowercase = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 441
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64
| 0
|
import unittest
from knapsack import greedy_knapsack as kp
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : List[str] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : int = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.assertRaisesRegex(_UpperCAmelCase , """max_weight must greater than zero.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(_UpperCAmelCase , """Weight can not be negative.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.assertRaisesRegex(_UpperCAmelCase , """Profit can not be negative.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(_UpperCAmelCase , """max_weight must greater than zero.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(
_UpperCAmelCase , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 429
|
import os
from collections.abc import Iterator
def _UpperCAmelCase (UpperCamelCase_ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(UpperCamelCase_ ):
_lowerCAmelCase : int = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ ).lstrip("""./""" )
def _UpperCAmelCase (UpperCamelCase_ : Dict ):
'''simple docstring'''
return F"{i * ' '}*" if i else "\n##"
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase_ ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(UpperCamelCase_ )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def _UpperCAmelCase (UpperCamelCase_ : str = "." ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """"""
for filepath in sorted(good_file_paths(UpperCamelCase_ ) ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = os.path.split(UpperCamelCase_ )
if filepath != old_path:
_lowerCAmelCase : Dict = print_path(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
_lowerCAmelCase : Dict = F"{filepath}/{filename}".replace(""" """ , """%20""" )
_lowerCAmelCase : int = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F"{md_prefix(UpperCamelCase_ )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 429
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[Any] = '''mra'''
def __init__( self , _A=50265 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=1 , _A=0.0_2 , _A=1e-5 , _A="absolute" , _A=4 , _A="full" , _A=0 , _A=0 , _A=1 , _A=0 , _A=2 , **_A , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__A : List[str] = vocab_size
__A : str = max_position_embeddings
__A : Optional[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : str = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : List[str] = hidden_act
__A : List[str] = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Dict = initializer_range
__A : List[str] = type_vocab_size
__A : Dict = layer_norm_eps
__A : int = position_embedding_type
__A : Optional[Any] = block_per_row
__A : int = approx_mode
__A : str = initial_prior_first_n_blocks
__A : Tuple = initial_prior_diagonal_n_blocks
| 710
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : int = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Any = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[str] = BertTokenizer
def __init__( self , _A=None , _A=None , _A=True , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A=True , _A=None , **_A , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
__A : Any = getattr(_A , normalizer_state.pop('type' ) )
__A : Union[str, Any] = do_lower_case
__A : Optional[int] = strip_accents
__A : List[Any] = tokenize_chinese_chars
__A : int = normalizer_class(**_A )
__A : Union[str, Any] = do_lower_case
def UpperCAmelCase_ ( self , _A , _A=None ):
__A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : Optional[Any] = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 77
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47
|
"""simple docstring"""
from math import sqrt
def snake_case ( lowerCAmelCase_ = 1000000 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 103
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _lowerCAmelCase ( ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = torch.nn.Linear(2 , 4 )
lowerCAmelCase_ : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase_ : int = torch.optim.lr_scheduler.OneCycleLR(_a , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase_ : str = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase_ : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _lowerCAmelCase ( _a : Any ) -> Tuple:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _lowerCAmelCase ( _a : Optional[int] ) -> Any:
lowerCAmelCase_ : Optional[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_a )
class lowercase__ ( __A ):
@require_cuda
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowercase ):
lowerCAmelCase_ : Union[str, Any] = Accelerator(cpu=_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[Any] = Accelerator()
lowerCAmelCase_ : int = GradientState()
assert state.num_steps == 1
lowerCAmelCase_ : Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase_ : str = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = create_components()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Optional[Any] = accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Union[str, Any] = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCAmelCase__ ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowercase , **_lowercase ):
pass
with patch("""torch.cuda.set_device""" , _lowercase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
lowerCAmelCase_ : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Dict = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
lowerCAmelCase_ : List[Any] = get_signature(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1e-3 )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = create_components()
accelerator.prepare(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
lowerCAmelCase_ : Tuple = get_signature(_lowercase )
# saving hook
def save_config(_lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : int = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(_lowercase , """data.json""" ) , """w""" ) as f:
json.dump(_lowercase , _lowercase )
# loading hook
def load_config(_lowercase , _lowercase ):
with open(os.path.join(_lowercase , """data.json""" ) , """r""" ) as f:
lowerCAmelCase_ : Dict = json.load(_lowercase )
lowerCAmelCase_ : Dict = config["""class_name"""]
lowerCAmelCase_ : Optional[Any] = accelerator.register_save_state_pre_hook(_lowercase )
lowerCAmelCase_ : Tuple = accelerator.register_load_state_pre_hook(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match with hooks
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ : List[str] = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowercase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ : Dict = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowercase )
self.assertTrue(abs(model_signature - get_signature(_lowercase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = create_components()
lowerCAmelCase_ : Tuple = None
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(dummy_obj is None )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = create_components()
lowerCAmelCase_ : int = [1, 2, 3]
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertEqual(
getattr(_lowercase , """_is_accelerate_prepared""" , _lowercase ) , _lowercase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(_lowercase , """_is_accelerate_prepared""" , _lowercase ) , _lowercase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_lowercase , """_is_accelerate_prepared""" , _lowercase ) , _lowercase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_lowercase , """_is_accelerate_prepared""" , _lowercase ) , _lowercase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_lowercase , """_is_accelerate_prepared""" , _lowercase ) , _lowercase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_lowercase , """_is_accelerate_prepared""" , _lowercase ) , _lowercase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def UpperCAmelCase__ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ : str = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_lowercase , device_map={"""""": 0} , )
lowerCAmelCase_ : Optional[int] = Accelerator()
# This should work
lowerCAmelCase_ : List[Any] = accelerator.prepare(_lowercase )
@slow
@require_bnb
def UpperCAmelCase__ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ : Optional[int] = Accelerator()
with init_empty_weights():
lowerCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
lowerCAmelCase_ : Dict = infer_auto_device_map(_lowercase )
lowerCAmelCase_ : Any = """cpu"""
lowerCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=_lowercase , load_in_abit=_lowercase , llm_inta_enable_fpaa_cpu_offload=_lowercase )
# This should not work and get value error
with self.assertRaises(_lowercase ):
lowerCAmelCase_ : Optional[Any] = accelerator.prepare(_lowercase )
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase__ ( self ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ : Any = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
lowerCAmelCase_ : Dict = infer_auto_device_map(_lowercase )
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_lowercase , device_map=_lowercase , )
lowerCAmelCase_ : Optional[Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowercase ):
lowerCAmelCase_ : Dict = accelerator.prepare(_lowercase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase__ ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
lowerCAmelCase_ : Any = infer_auto_device_map(_lowercase )
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_lowercase , device_map=_lowercase , )
lowerCAmelCase_ : int = Accelerator()
# This should work
lowerCAmelCase_ : Union[str, Any] = accelerator.prepare(_lowercase )
@require_cuda
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = torch.nn.Linear(10 , 10 )
lowerCAmelCase_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCAmelCase_ : List[Any] = Accelerator(cpu=_lowercase )
lowerCAmelCase_ : Union[str, Any] = accelerator.prepare(_lowercase )
| 440
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class lowercase__ ( __A , __A ):
__UpperCamelCase = """bit"""
__UpperCamelCase = ["""preactivation""", """bottleneck"""]
__UpperCamelCase = ["""SAME""", """VALID"""]
def __init__( self , _lowercase=3 , _lowercase=64 , _lowercase=[256, 512, 1_024, 2_048] , _lowercase=[3, 4, 6, 3] , _lowercase="preactivation" , _lowercase="relu" , _lowercase=None , _lowercase=32 , _lowercase=0.0 , _lowercase=False , _lowercase=32 , _lowercase=1 , _lowercase=None , _lowercase=None , **_lowercase , ):
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowerCAmelCase_ : int = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : List[str] = embedding_size
lowerCAmelCase_ : Dict = hidden_sizes
lowerCAmelCase_ : Optional[int] = depths
lowerCAmelCase_ : List[Any] = layer_type
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Optional[int] = global_padding
lowerCAmelCase_ : Optional[Any] = num_groups
lowerCAmelCase_ : Optional[Any] = drop_path_rate
lowerCAmelCase_ : Tuple = embedding_dynamic_padding
lowerCAmelCase_ : Any = output_stride
lowerCAmelCase_ : List[Any] = width_factor
lowerCAmelCase_ : Union[str, Any] = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(_lowercase ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 440
| 1
|
import math
snake_case__ = 10
snake_case__ = 7
snake_case__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase__ ( a : int = 20 ) -> str:
"""simple docstring"""
a__ :List[str] = math.comb(a , a )
a__ :Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a )
a__ :Union[str, Any] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 395
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self : Any , __A : Optional[int] , __A : Optional[int]=2 , __A : int=3 , __A : Union[str, Any]=4 , __A : Tuple=2 , __A : Union[str, Any]=7 , __A : Any=True , __A : List[str]=True , __A : Tuple=True , __A : Tuple=True , __A : List[str]=99 , __A : Tuple=36 , __A : Union[str, Any]=3 , __A : str=4 , __A : str=37 , __A : int="gelu" , __A : Union[str, Any]=0.1 , __A : str=0.1 , __A : List[Any]=512 , __A : Optional[int]=16 , __A : int=2 , __A : List[Any]=0.02 , __A : Optional[Any]=6 , __A : int=6 , __A : str=3 , __A : Optional[int]=4 , __A : Union[str, Any]=None , __A : Tuple=1000 , ) ->Any:
"""simple docstring"""
a__ :Any = parent
a__ :Optional[int] = batch_size
a__ :Union[str, Any] = num_channels
a__ :Any = image_size
a__ :Optional[Any] = patch_size
a__ :Optional[Any] = text_seq_length
a__ :int = is_training
a__ :Tuple = use_input_mask
a__ :Any = use_token_type_ids
a__ :int = use_labels
a__ :str = vocab_size
a__ :List[str] = hidden_size
a__ :Optional[int] = num_hidden_layers
a__ :List[str] = num_attention_heads
a__ :List[str] = intermediate_size
a__ :int = hidden_act
a__ :Optional[Any] = hidden_dropout_prob
a__ :Union[str, Any] = attention_probs_dropout_prob
a__ :int = max_position_embeddings
a__ :Tuple = type_vocab_size
a__ :Union[str, Any] = type_sequence_label_size
a__ :List[Any] = initializer_range
a__ :str = coordinate_size
a__ :Union[str, Any] = shape_size
a__ :int = num_labels
a__ :Optional[int] = num_choices
a__ :str = scope
a__ :int = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ :str = text_seq_length
a__ :Tuple = (image_size // patch_size) ** 2 + 1
a__ :Optional[int] = self.text_seq_length + self.image_seq_length
def _snake_case ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ :Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ :Optional[Any] = bbox[i, j, 3]
a__ :List[str] = bbox[i, j, 1]
a__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ :Any = bbox[i, j, 2]
a__ :int = bbox[i, j, 0]
a__ :Optional[Any] = t
a__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ :List[Any] = None
if self.use_input_mask:
a__ :str = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ :Optional[Any] = None
if self.use_token_type_ids:
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ :List[str] = None
a__ :List[str] = None
if self.use_labels:
a__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ :List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ :Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Tuple , __A : Any , __A : Union[str, Any] , __A : List[str] , __A : Dict , __A : int , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
a__ :List[Any] = model(__A , pixel_values=__A )
a__ :int = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
a__ :Union[str, Any] = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
a__ :Optional[Any] = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ :Dict = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ :Dict = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , __A : List[str] , __A : str , __A : Union[str, Any] , __A : str , __A : Any , __A : List[Any] , __A : str , __A : Tuple ) ->Tuple:
"""simple docstring"""
a__ :Optional[Any] = self.num_labels
a__ :Tuple = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
a__ :str = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[int] , __A : str , __A : Tuple , __A : Union[str, Any] , __A : Union[str, Any] , __A : Dict , __A : int , __A : Optional[int] , __A : int ) ->List[str]:
"""simple docstring"""
a__ :Dict = self.num_labels
a__ :Dict = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
a__ :Tuple = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : str , __A : Optional[Any] , __A : Optional[Any] , __A : List[str] , __A : Union[str, Any] , __A : int , __A : Optional[int] , __A : Union[str, Any] , __A : str ) ->Dict:
"""simple docstring"""
a__ :List[str] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
a__ :List[str] = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : List[Any] ) ->Dict:
"""simple docstring"""
a__ :str = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) :str = config_and_inputs
a__ :Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] , __A : List[str] , __A : Dict ) ->Dict:
"""simple docstring"""
return True
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a__ :int = LayoutLMvaModelTester(self )
a__ :Union[str, Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self : int , __A : int , __A : List[Any] , __A : Optional[int]=False ) ->Optional[Any]:
"""simple docstring"""
a__ :Union[str, Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
a__ :Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
a__ :List[str] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
a__ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
a__ :Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def _snake_case ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : int ) ->Optional[Any]:
"""simple docstring"""
a__ :str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ :List[Any] = type
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : Tuple ) ->str:
"""simple docstring"""
a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _snake_case ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def _snake_case ( self : Optional[int] ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def _snake_case ( self : Union[str, Any] ) ->str:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ :int = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
a__ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a__ :Optional[Any] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
a__ :str = self.default_image_processor
a__ :List[str] = prepare_img()
a__ :Tuple = image_processor(images=__A , return_tensors="pt" ).pixel_values.to(__A )
a__ :Dict = torch.tensor([[1, 2]] )
a__ :Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a__ :int = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
a__ :int = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
a__ :Any = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ) )
| 395
| 1
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 707
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def __A ( _SCREAMING_SNAKE_CASE : List[str]="ro" , _SCREAMING_SNAKE_CASE : Dict="en" , _SCREAMING_SNAKE_CASE : int="wmt16" , _SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__SCREAMING_SNAKE_CASE : List[Any] = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
__SCREAMING_SNAKE_CASE : str = datasets.load_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if save_dir is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = f'{dataset}-{pair}'
__SCREAMING_SNAKE_CASE : Dict = Path(_SCREAMING_SNAKE_CASE )
save_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
__SCREAMING_SNAKE_CASE : Optional[Any] = "val" if split == "validation" else split
__SCREAMING_SNAKE_CASE : Optional[int] = save_dir.joinpath(f'{fn}.source' )
__SCREAMING_SNAKE_CASE : Optional[int] = save_dir.joinpath(f'{fn}.target' )
__SCREAMING_SNAKE_CASE : Optional[Any] = src_path.open("w+" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__SCREAMING_SNAKE_CASE : int = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 564
| 0
|
"""simple docstring"""
from string import ascii_uppercase
UpperCAmelCase_ : int = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ : Optional[Any] = dict(enumerate(ascii_uppercase))
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while True:
if x == i:
SCREAMING_SNAKE_CASE_ : Any = 0
if len(__a ) == len(__a ):
break
key += key[i]
i += 1
return key
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE_ : Dict = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
SCREAMING_SNAKE_CASE_ : List[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ''''''
SCREAMING_SNAKE_CASE_ : Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def _A () -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''THE GERMAN ATTACK'''
SCREAMING_SNAKE_CASE_ : str = '''SECRET'''
SCREAMING_SNAKE_CASE_ : Optional[int] = generate_key(__a , __a )
SCREAMING_SNAKE_CASE_ : str = cipher_text(__a , __a )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(__a , __a )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 512
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["torch", "torchsde"]
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Dict):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , *lowercase_ : int , **lowercase_ : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Tuple):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''])
| 512
| 1
|
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['torch', 'scipy']
def __init__( self : Any ,*snake_case : Any ,**snake_case : str ):
requires_backends(self ,['torch', 'scipy'] )
@classmethod
def _lowerCAmelCase ( cls : Tuple ,*snake_case : Optional[Any] ,**snake_case : int ):
requires_backends(cls ,['torch', 'scipy'] )
@classmethod
def _lowerCAmelCase ( cls : Optional[int] ,*snake_case : int ,**snake_case : Dict ):
requires_backends(cls ,['torch', 'scipy'] )
| 252
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowerCamelCase =False
try:
_lowerCamelCase =_is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : str = None ,snake_case : list = [] ):
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =choices
SCREAMING_SNAKE_CASE =prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE ='*'
else:
SCREAMING_SNAKE_CASE ='➔ '
def _lowerCAmelCase ( self : Tuple ,snake_case : Any ,snake_case : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] ,32 ,snake_case )
else:
forceWrite(self.choices[index] ,snake_case )
def _lowerCAmelCase ( self : str ,snake_case : int ):
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(snake_case )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Direction ,snake_case : int = 1 ):
SCREAMING_SNAKE_CASE =self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case )
move_cursor(snake_case ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _lowerCAmelCase ( self : Optional[int] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _lowerCAmelCase ( self : List[str] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _lowerCAmelCase ( self : Optional[int] ):
move_cursor(len(self.choices ) - self.position ,'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _lowerCAmelCase ( self : Tuple ):
move_cursor(len(self.choices ) - self.position ,'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case )] for number in range(10 )] )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE =index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,snake_case )
else:
return
else:
return
def _lowerCAmelCase ( self : Any ,snake_case : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt ,'\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' ,'\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' ,'\n' )
SCREAMING_SNAKE_CASE =default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position ,'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE =int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE =default_choice
else:
SCREAMING_SNAKE_CASE =self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,'UP' )
clear_line()
self.write_choice(snake_case ,'\n' )
return choice
| 252
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase__ ( lowerCAmelCase : Dict="ro" , lowerCAmelCase : Any="en" , lowerCAmelCase : Any="wmt16" , lowerCAmelCase : str=None ) -> Optional[Any]:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
UpperCAmelCase = F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
UpperCAmelCase = datasets.load_dataset(__UpperCamelCase , __UpperCamelCase )
if save_dir is None:
UpperCAmelCase = F"{dataset}-{pair}"
UpperCAmelCase = Path(__UpperCamelCase )
save_dir.mkdir(exist_ok=__UpperCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
UpperCAmelCase = 'val' if split == 'validation' else split
UpperCAmelCase = save_dir.joinpath(F"{fn}.source" )
UpperCAmelCase = save_dir.joinpath(F"{fn}.target" )
UpperCAmelCase = src_path.open('w+' )
UpperCAmelCase = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCAmelCase = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 373
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Dict = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['''MobileViTFeatureExtractor''']
lowerCAmelCase : Optional[Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[int] = 'vit'
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , **__UpperCAmelCase , ) -> str:
super().__init__(**__UpperCAmelCase )
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = layer_norm_eps
_a = image_size
_a = patch_size
_a = num_channels
_a = qkv_bias
_a = encoder_stride
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = version.parse('1.11' )
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
| 285
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 285
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__a: Dict = logging.get_logger(__name__)
__a: List[Any] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "trajectory_transformer"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __lowerCAmelCase=100 , __lowerCAmelCase=5 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=249 , __lowerCAmelCase=6 , __lowerCAmelCase=17 , __lowerCAmelCase=25 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=128 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_0_0_6 , __lowerCAmelCase=512 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=50256 , __lowerCAmelCase=50256 , **__lowerCAmelCase , ) -> Optional[Any]:
lowercase__ : int = vocab_size
lowercase__ : Tuple = action_weight
lowercase__ : Tuple = reward_weight
lowercase__ : Dict = value_weight
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Union[str, Any] = block_size
lowercase__ : Tuple = action_dim
lowercase__ : Dict = observation_dim
lowercase__ : Optional[Any] = transition_dim
lowercase__ : List[Any] = learning_rate
lowercase__ : int = n_layer
lowercase__ : Union[str, Any] = n_head
lowercase__ : Optional[int] = n_embd
lowercase__ : Optional[Any] = embd_pdrop
lowercase__ : List[Any] = attn_pdrop
lowercase__ : Union[str, Any] = resid_pdrop
lowercase__ : Any = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : List[Any] = kaiming_initializer_range
lowercase__ : int = use_cache
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 152
|
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCamelCase ) )
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
# Base Case
if index == len(__lowerCamelCase ):
return True
# Recursive Step
for i in range(__lowerCamelCase ):
if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ):
# Color current vertex
a__ = i
# Validate coloring
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ):
return True
# Backtrack
a__ = -1
return False
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]:
a__ = [-1] * len(__lowerCamelCase )
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ):
return colored_vertices
return []
| 489
| 0
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_a: Dict = logging.get_logger(__name__)
_a: Optional[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class __UpperCamelCase :
def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
UpperCAmelCase_ = model
UpperCAmelCase_ = kwargs.get("model_save_dir" , lowerCAmelCase )
UpperCAmelCase_ = kwargs.get("latest_model_name" , lowerCAmelCase )
def __call__( self : Union[str, Any] , **lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = {k: np.array(lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase , lowerCAmelCase )
@staticmethod
def __A ( lowerCAmelCase : Union[str, Path] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=None ):
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
UpperCAmelCase_ = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase , providers=[provider] , sess_options=lowerCAmelCase )
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, Path] , lowerCAmelCase : Optional[str] = None , **lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ = Path(lowerCAmelCase ).joinpath(lowerCAmelCase )
try:
shutil.copyfile(lowerCAmelCase , lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ = self.model_save_dir.joinpath(lowerCAmelCase )
if src_path.exists():
UpperCAmelCase_ = Path(lowerCAmelCase ).joinpath(lowerCAmelCase )
try:
shutil.copyfile(lowerCAmelCase , lowerCAmelCase )
except shutil.SameFileError:
pass
def __A ( self : List[str] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : int , ):
'''simple docstring'''
if os.path.isfile(lowerCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
# saving model weights/files
self._save_pretrained(lowerCAmelCase , **lowerCAmelCase )
@classmethod
def __A ( cls : List[str] , lowerCAmelCase : Union[str, Path] , lowerCAmelCase : Optional[Union[bool, str, None]] = None , lowerCAmelCase : Optional[Union[str, None]] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional["ort.SessionOptions"] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase ):
UpperCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase , lowerCAmelCase ) , provider=lowerCAmelCase , sess_options=lowerCAmelCase )
UpperCAmelCase_ = Path(lowerCAmelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ = hf_hub_download(
repo_id=lowerCAmelCase , filename=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , )
UpperCAmelCase_ = Path(lowerCAmelCase ).parent
UpperCAmelCase_ = Path(lowerCAmelCase ).name
UpperCAmelCase_ = OnnxRuntimeModel.load_model(lowerCAmelCase , provider=lowerCAmelCase , sess_options=lowerCAmelCase )
return cls(model=lowerCAmelCase , **lowerCAmelCase )
@classmethod
def __A ( cls : Optional[int] , lowerCAmelCase : Union[str, Path] , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[str] = None , **lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase_ = None
if len(str(lowerCAmelCase ).split("@" ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase , revision=lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , use_auth_token=lowerCAmelCase , **lowerCAmelCase , )
| 718
|
from collections.abc import Sequence
def __lowerCAmelCase ( A , A = False ):
if not arr:
return 0
UpperCAmelCase_ = 0 if allow_empty_subarrays else float("-inf" )
UpperCAmelCase_ = 0.0
for num in arr:
UpperCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCAmelCase_ = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_a: Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 268
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase : Union[str, Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
_lowerCAmelCase : str = "▁"
# Segments (not really needed)
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : int = 4
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = 'left'
SCREAMING_SNAKE_CASE = XLNetTokenizer
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=True , __snake_case=False , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<sep>" , __snake_case="<pad>" , __snake_case="<cls>" , __snake_case="<mask>" , __snake_case=["<eop>", "<eod>"] , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__a =3
__a =do_lower_case
__a =remove_space
__a =keep_accents
__a =vocab_file
__a =False if not self.vocab_file else True
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
__a =[self.sep_token_id]
__a =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__a =os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 242
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_lowerCAmelCase : List[str] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCAmelCase : Tuple = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
_lowerCAmelCase : Optional[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __magic_name__ ( pl.LightningModule ):
def __init__( self , __snake_case , __snake_case=None , __snake_case="base" , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__snake_case )
__a =0
__a =Path(self.hparams.output_dir )
__a =self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a =AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__snake_case , **__snake_case , )
else:
__a =config
__a =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __snake_case , __snake_case ):
assert hasattr(self.config , __snake_case ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __snake_case , getattr(self.hparams , __snake_case ) )
if tokenizer is None:
__a =AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__snake_case , )
else:
__a =tokenizer
__a =MODEL_MODES[mode]
if model is None:
__a =self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__snake_case , )
else:
__a =model
def __magic_name__ ( self , *__snake_case , **__snake_case ) -> int:
'''simple docstring'''
__a =self.model_type.from_pretrained(*__snake_case , **__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =arg_to_scheduler[self.hparams.lr_scheduler]
__a =get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a ={'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =self.model
__a =['bias', 'LayerNorm.weight']
__a =[
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__a =Adafactor(
__snake_case , lr=self.hparams.learning_rate , scale_parameter=__snake_case , relative_step=__snake_case )
else:
__a =AdamW(
__snake_case , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a =optimizer
__a =self.get_lr_scheduler()
return [optimizer], [scheduler]
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
return self.validation_step(__snake_case , __snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.validation_end(__snake_case )
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a =self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
if stage == "test":
__a =len(self.test_dataloader().dataset )
else:
__a =self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__snake_case )
__a =len(self.train_dataloader().dataset )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = False ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.train_loader
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__snake_case )
def __magic_name__ ( self , __snake_case ) -> List[Any]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__snake_case , list(filter(__snake_case , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.output_dir.joinpath('best_tfmr' )
__a =self.step_count
self.model.save_pretrained(__snake_case )
self.tokenizer.save_pretrained(__snake_case )
@staticmethod
def __magic_name__ ( __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__snake_case , type=__snake_case , required=__snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__snake_case , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__snake_case , type=__snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'cache' ) , type=__snake_case , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__snake_case , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__snake_case , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__snake_case , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__snake_case , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__snake_case , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__snake_case , metavar=__snake_case , type=__snake_case , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__snake_case , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__snake_case )
parser.add_argument('--train_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--eval_batch_size' , default=32 , type=__snake_case )
parser.add_argument('--adafactor' , action='store_true' )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> Any:
'''simple docstring'''
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__snake_case )
class __magic_name__ ( pl.Callback ):
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =trainer.lr_schedulers[0]['scheduler']
__a ={f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__a =trainer.callback_metrics
# Log results
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def __magic_name__ ( self , __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__a =trainer.callback_metrics
# Log and save results to file
__a =os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__snake_case , 'w' ) as writer:
for key in sorted(__snake_case ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__snake_case , str(metrics[key] ) ) )
def UpperCamelCase_( _snake_case : str , _snake_case : int ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_snake_case , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=_snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def UpperCamelCase_( _snake_case : BaseTransformer , _snake_case : argparse.Namespace , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=True , _snake_case : Dict=[] , _snake_case : List[str]=None , _snake_case : Any=None , **_snake_case : str , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__a =Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__a =pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_snake_case )
if logging_callback is None:
__a =LoggingCallback()
__a ={}
if args.fpaa:
__a =16
if args.gpus > 1:
__a ='auto'
__a ='ddp'
__a =args.accumulate_grad_batches
__a =None
__a ='auto'
__a =pl.Trainer.from_argparse_args(
_snake_case , weights_summary=_snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **_snake_case , )
if args.do_train:
trainer.fit(_snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 242
| 1
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( __lowerCamelCase:list[int] , __lowerCamelCase:list[int] , __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__lowerCamelCase ):
__magic_name__ = burst_time[i]
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 9_9_9_9_9_9_9_9_9
__magic_name__ = 0
__magic_name__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__magic_name__ = remaining_time[j]
__magic_name__ = j
__magic_name__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__magic_name__ = remaining_time[short]
if minm == 0:
__magic_name__ = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
__magic_name__ = False
# Find finish time of current process
__magic_name__ = increment_time + 1
# Calculate waiting time
__magic_name__ = finish_time - arrival_time[short]
__magic_name__ = finar - burst_time[short]
if waiting_time[short] < 0:
__magic_name__ = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( __lowerCamelCase:list[int] , __lowerCamelCase:int , __lowerCamelCase:list[int] ):
'''simple docstring'''
__magic_name__ = [0] * no_of_processes
for i in range(__lowerCamelCase ):
__magic_name__ = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( __lowerCamelCase:list[int] , __lowerCamelCase:list[int] , __lowerCamelCase:int ):
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 0
for i in range(__lowerCamelCase ):
__magic_name__ = total_waiting_time + waiting_time[i]
__magic_name__ = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowercase = int(input())
lowercase = [0] * no_of_processes
lowercase = [0] * no_of_processes
lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowercase , lowercase = map(int, input().split())
lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase = burst_time
lowercase = no_of_processes
lowercase = waiting_time
lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 715
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _lowerCAmelCase ( __lowerCamelCase:str , __lowerCamelCase:str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_0_2_4,
"hidden_size": 7_6_8,
"max_length": 5_1_2,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_0_2_4,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
__magic_name__ = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(__lowerCamelCase ),
}
__magic_name__ = BertConfig.from_dict(__lowerCamelCase )
__magic_name__ = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase:Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase:str , __lowerCamelCase:Union[str, Any] ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(__lowerCamelCase )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
__magic_name__ = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(__lowerCamelCase , return_tensors="pt" )
__magic_name__ = hf_bort_model(**__lowerCamelCase )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , __lowerCamelCase )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 468
| 0
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : Tuple = 'cpu'
__lowerCAmelCase : Any = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
__lowerCAmelCase : Optional[int] = 'path-to-your-trained-model'
__lowerCAmelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[str] = pipe.to(device)
# to channels last
__lowerCAmelCase : List[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : Dict = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Optional[int] = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : str = torch.rand(1) * 999
__lowerCAmelCase : Optional[int] = torch.randn(2, 77, 768)
__lowerCAmelCase : str = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Tuple = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : Any = 666
__lowerCAmelCase : Tuple = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : Tuple = {'generator': generator}
if args.steps is not None:
__lowerCAmelCase : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 58
|
"""simple docstring"""
from __future__ import annotations
lowercase__ :Dict = 'Muhammad Umer Farooq'
lowercase__ :Any = 'MIT'
lowercase__ :List[str] = '1.0.0'
lowercase__ :str = 'Muhammad Umer Farooq'
lowercase__ :List[str] = 'contact@muhammadumerfarooq.me'
lowercase__ :Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , __lowercase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : list[str] = []
__UpperCAmelCase : Tuple = domain
def A_ ( self : Any , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCAmelCase : List[Any] = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return ".".join(get_sub_domain_name(UpperCAmelCase_ ).split('''.''' )[-2:] )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return parse.urlparse(UpperCAmelCase_ ).netloc
def lowerCamelCase_ ( UpperCAmelCase_ = "https://github.com" ) ->list[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = get_domain_name(UpperCAmelCase_ )
# Initialize the parser
__UpperCAmelCase : int = Parser(UpperCAmelCase_ )
try:
# Open URL
__UpperCAmelCase : Union[str, Any] = requests.get(UpperCAmelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCAmelCase : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCAmelCase : Optional[int] = requests.get(UpperCAmelCase_ )
# Get the valid email.
__UpperCAmelCase : Tuple = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :List[str] = emails_from_url('https://github.com')
print(f"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 522
| 0
|
import requests
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''Content-Type''': '''application/json'''}
__a = requests.post(_UpperCAmelCase , json={'''text''': message_body} , headers=_UpperCAmelCase )
if response.status_code != 200:
__a = (
'''Request to slack returned an error '''
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'camembert'
def __init__( self : Union[str, Any] , __UpperCAmelCase : str=3_0_5_2_2 , __UpperCAmelCase : Tuple=7_6_8 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : Optional[Any]=1_2 , __UpperCAmelCase : Dict=3_0_7_2 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : str=1 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : int=2 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[str]=None , **__UpperCAmelCase : Dict , ) -> Optional[int]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 196
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase (unittest.TestCase ):
def __init__( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Any=7 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Any=1_8 , __UpperCAmelCase : Tuple=3_0 , __UpperCAmelCase : List[Any]=4_0_0 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=True , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 2_0}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 196
| 1
|
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase_ :
def __init__( self : Tuple , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = order
# a_{0} ... a_{k}
UpperCAmelCase__ : Any = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase__ : Tuple = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase__ : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase__ : Optional[int] = [0.0] * self.order
def lowercase_ ( self : Any , _A : list[float] , _A : list[float] ):
'''simple docstring'''
if len(_A ) < self.order:
UpperCAmelCase__ : Any = [1.0, *a_coeffs]
if len(_A ) != self.order + 1:
UpperCAmelCase__ : int = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_A )}"""
)
raise ValueError(_A )
if len(_A ) != self.order + 1:
UpperCAmelCase__ : Tuple = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_A )}"""
)
raise ValueError(_A )
UpperCAmelCase__ : List[Any] = a_coeffs
UpperCAmelCase__ : List[Any] = b_coeffs
def lowercase_ ( self : int , _A : float ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase__ : List[str] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase__ : List[str] = self.input_history[:-1]
UpperCAmelCase__ : Tuple = self.output_history[:-1]
UpperCAmelCase__ : List[Any] = sample
UpperCAmelCase__ : Union[str, Any] = result
return result
| 708
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = args.pruning_method
UpperCAmelCase__ : List[str] = args.threshold
UpperCAmelCase__ : str = args.model_name_or_path.rstrip('''/''' )
UpperCAmelCase__ : List[str] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
UpperCAmelCase__ : str = torch.load(os.path.join(lowerCAmelCase__ , '''pytorch_model.bin''' ) )
UpperCAmelCase__ : Optional[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCAmelCase__ : Dict = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
UpperCAmelCase__ : Union[str, Any] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
UpperCAmelCase__ : Dict = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
UpperCAmelCase__ : str = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCAmelCase__ : Optional[Any] = name[:-6]
UpperCAmelCase__ : Tuple = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ : Optional[Any] = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Any = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCAmelCase__ : Dict = name[:-6]
UpperCAmelCase__ : Union[str, Any] = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ : Optional[Any] = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCAmelCase__ : int = name[:-6]
UpperCAmelCase__ : Dict = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ , UpperCAmelCase__ : Any = -0.1, 1.1
UpperCAmelCase__ : List[str] = torch.sigmoid(lowerCAmelCase__ )
UpperCAmelCase__ : Any = s * (r - l) + l
UpperCAmelCase__ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
UpperCAmelCase__ : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCAmelCase__ : List[str] = os.path.join(
os.path.dirname(lowerCAmelCase__ ) , F"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 312
| 0
|
import os
__a: int = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(UpperCamelCase_ ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
_UpperCAmelCase = """"""
_UpperCAmelCase = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
_UpperCAmelCase = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
_UpperCAmelCase = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _SCREAMING_SNAKE_CASE ( __snake_case = "/p089_roman.txt" ) -> int:
_UpperCAmelCase = 0
with open(os.path.dirname(UpperCamelCase_ ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(UpperCamelCase_ )
_UpperCAmelCase = generate_roman_numerals(UpperCamelCase_ )
savings += len(UpperCamelCase_ ) - len(UpperCamelCase_ )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 108
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_SCREAMING_SNAKE_CASE = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> int:
'''simple docstring'''
if rng is None:
UpperCamelCase = random.Random()
UpperCamelCase = 1
for dim in shape:
total_dims *= dim
UpperCamelCase = []
for _ in range(UpperCamelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCamelCase = np.array(UpperCamelCase_ , dtype=jnp.intaa ).reshape(UpperCamelCase_ )
return output
def lowercase( UpperCamelCase_ , UpperCamelCase_=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase = ids_tensor(UpperCamelCase_ , vocab_size=2 , rng=UpperCamelCase_ )
# make sure that at least one token is attended to for each batch
UpperCamelCase = 1
return attn_mask
@require_flax
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = None
__lowerCAmelCase = ()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCamelCase = 2
UpperCamelCase = inputs["""input_ids"""].shape[-1] // 2
UpperCamelCase = inputs["""input_ids"""][:max_batch_size, :sequence_length]
UpperCamelCase = jnp.ones_like(lowerCamelCase_ )
UpperCamelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCamelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCamelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
UpperCamelCase = 0
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = pt_model_class(lowerCamelCase_ ).eval()
UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCamelCase_ , flax_model.params )
UpperCamelCase = flax_model.generate(lowerCamelCase_ ).sequences
UpperCamelCase = pt_model.generate(torch.tensor(lowerCamelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCamelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = True
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = False
UpperCamelCase = max_length
UpperCamelCase = 2
UpperCamelCase = 2
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = True
UpperCamelCase = max_length
UpperCamelCase = 0.8
UpperCamelCase = 10
UpperCamelCase = 0.3
UpperCamelCase = 1
UpperCamelCase = 8
UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = max_length
UpperCamelCase = 1
UpperCamelCase = 8
UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
UpperCamelCase = max_length
UpperCamelCase = 2
UpperCamelCase = 1
UpperCamelCase = 8
UpperCamelCase = 9
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase = False
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase = True
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCamelCase = attention_mask.at[(0, 0)].set(0 )
UpperCamelCase = 2
UpperCamelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
UpperCamelCase = jit(model.generate )
UpperCamelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
UpperCamelCase = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase = """Hello world"""
UpperCamelCase = tokenizer(lowerCamelCase_ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCamelCase_ , """do_samples""" ):
model.generate(lowerCamelCase_ , do_samples=lowerCamelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCamelCase_ , """foo""" ):
UpperCamelCase = {"""foo""": """bar"""}
model.generate(lowerCamelCase_ , **lowerCamelCase_ )
| 537
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase = logging.getLogger(__name__)
def lowerCamelCase_ ( _lowercase=2 , _lowercase=3 , _lowercase=16 , _lowercase = 10 , _lowercase = 2 ) -> Any:
def get_dataset(_lowercase ):
__A : int = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowercase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__A : Union[str, Any] = get_dataset(_lowercase )
__A : Union[str, Any] = get_dataset(_lowercase )
__A : Any = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
__A : Optional[Any] = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
__A : Any = []
for epoch in range(_lowercase ):
# Train quickly
model.train()
for batch in dataloader:
__A , __A : Tuple = batch
__A : int = model(_lowercase )
__A : Optional[int] = torch.nn.functional.mse_loss(_lowercase , _lowercase )
accelerator.backward(_lowercase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
__A : Optional[int] = nn.Parameter(torch.randn(1 ) )
__A : Union[str, Any] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return x * self.a + self.b
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : Tuple = DummyModel()
__A : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : str = dummy_dataloaders()
__A : List[Any] = ProjectConfiguration(total_limit=1 , project_dir=__UpperCAmelCase , automatic_checkpoint_naming=__UpperCAmelCase )
# Train baseline
__A : Dict = Accelerator(project_config=__UpperCAmelCase )
__A , __A , __A , __A : Optional[int] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : Optional[int] = DummyModel()
__A : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : Dict = dummy_dataloaders()
# Train baseline
__A : List[str] = Accelerator()
__A , __A , __A , __A : List[str] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save initial
__A : Dict = os.path.join(__UpperCAmelCase , "initial" )
accelerator.save_state(__UpperCAmelCase )
((__A) , (__A)) : int = model.a.item(), model.b.item()
__A : Any = optimizer.state_dict()
__A : List[Any] = train(3 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
((__A) , (__A)) : List[Any] = model.a.item(), model.b.item()
__A : int = optimizer.state_dict()
# Train partially
set_seed(42 )
__A : Optional[Any] = DummyModel()
__A : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : int = dummy_dataloaders()
__A : Optional[Any] = Accelerator()
__A , __A , __A , __A : Dict = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.load_state(__UpperCAmelCase )
((__A) , (__A)) : List[Any] = model.a.item(), model.b.item()
__A : Any = optimizer.state_dict()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
__A : Any = train(2 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save everything
__A : Dict = os.path.join(__UpperCAmelCase , "checkpoint" )
accelerator.save_state(__UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(__UpperCAmelCase )
test_rands += train(1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
((__A) , (__A)) : Union[str, Any] = model.a.item(), model.b.item()
__A : Optional[int] = optimizer.state_dict()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : Any = DummyModel()
__A : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : List[Any] = dummy_dataloaders()
__A : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__UpperCAmelCase )
# Train baseline
__A : Any = Accelerator(project_dir=__UpperCAmelCase , project_config=__UpperCAmelCase )
__A , __A , __A , __A : int = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save initial
accelerator.save_state()
((__A) , (__A)) : Tuple = model.a.item(), model.b.item()
__A : Union[str, Any] = optimizer.state_dict()
__A : Union[str, Any] = train(3 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
((__A) , (__A)) : Optional[int] = model.a.item(), model.b.item()
__A : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(42 )
__A : Optional[Any] = DummyModel()
__A : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A , __A : Tuple = dummy_dataloaders()
__A : Union[str, Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__UpperCAmelCase )
__A : List[str] = Accelerator(project_dir=__UpperCAmelCase , project_config=__UpperCAmelCase )
__A , __A , __A , __A : str = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.load_state(os.path.join(__UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
((__A) , (__A)) : Optional[Any] = model.a.item(), model.b.item()
__A : Optional[Any] = optimizer.state_dict()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
__A : Tuple = train(2 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__UpperCAmelCase , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
((__A) , (__A)) : List[str] = model.a.item(), model.b.item()
__A : str = optimizer.state_dict()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[int] = torch.tensor([1, 2, 3] )
__A : List[Any] = torch.tensor([2, 3, 4] )
__A : List[str] = DummyModel()
__A : List[str] = torch.optim.Adam(net.parameters() )
__A : Union[str, Any] = Accelerator()
with self.assertRaises(__UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__A : Tuple = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def __UpperCAmelCase( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : Any = DummyModel()
__A : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__A : Tuple = torch.optim.lr_scheduler.StepLR(__UpperCAmelCase , step_size=1 , gamma=0.99 )
__A , __A : str = dummy_dataloaders()
__A : List[str] = ProjectConfiguration(automatic_checkpoint_naming=__UpperCAmelCase )
# Train baseline
__A : Optional[int] = Accelerator(project_dir=__UpperCAmelCase , project_config=__UpperCAmelCase )
__A , __A , __A , __A , __A : Any = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save initial
accelerator.save_state()
__A : List[str] = scheduler.state_dict()
train(3 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__UpperCAmelCase , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(__UpperCAmelCase , scheduler.state_dict() )
def __UpperCAmelCase( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__A : Any = DummyModel()
__A : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=__UpperCAmelCase , total_limit=2 )
# Train baseline
__A : int = Accelerator(project_dir=__UpperCAmelCase , project_config=__UpperCAmelCase )
__A : List[Any] = accelerator.prepare(__UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__UpperCAmelCase , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def __UpperCAmelCase( self ):
__A : Union[str, Any] = ["torchrun", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase = '/tmp/accelerate/state_checkpointing'
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCamelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase , UpperCamelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCamelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCamelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 387
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[Any] = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
__A : Any = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__A : Optional[Any] = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
__A : List[Any] = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
__A : int = tempfile.mkdtemp()
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__A : List[str] = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
# load decoder from hub
__A : Dict = "hf-internal-testing/ngram-beam-search-decoder"
def __UpperCAmelCase( self , **__UpperCAmelCase ):
__A : Tuple = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __UpperCAmelCase( self , **__UpperCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __UpperCAmelCase( self , **__UpperCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCAmelCase )
def __UpperCAmelCase( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[int] = self.get_feature_extractor()
__A : Union[str, Any] = self.get_decoder()
__A : int = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__A : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__A : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __UpperCAmelCase( self ):
__A : Dict = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__UpperCAmelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __UpperCAmelCase( self ):
__A : Any = self.get_feature_extractor()
__A : List[Any] = self.get_tokenizer()
__A : Any = self.get_decoder()
__A : Dict = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
__A : Optional[int] = floats_list((3, 1_000) )
__A : List[Any] = feature_extractor(__UpperCAmelCase , return_tensors="np" )
__A : int = processor(__UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase( self ):
__A : Tuple = self.get_feature_extractor()
__A : Tuple = self.get_tokenizer()
__A : Dict = self.get_decoder()
__A : int = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
__A : Tuple = "This is a test string"
__A : Union[str, Any] = processor(text=__UpperCAmelCase )
__A : Union[str, Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77 ):
np.random.seed(__UpperCAmelCase )
return np.random.rand(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Dict = self.get_feature_extractor()
__A : Dict = self.get_tokenizer()
__A : Tuple = self.get_decoder()
__A : Any = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
__A : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__A : str = processor.decode(__UpperCAmelCase )
__A : Dict = decoder.decode_beams(__UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : int = self.get_feature_extractor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = self.get_decoder()
__A : List[Any] = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
__A : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__A : List[Any] = processor.batch_decode(__UpperCAmelCase )
else:
with get_context(__UpperCAmelCase ).Pool() as pool:
__A : Tuple = processor.batch_decode(__UpperCAmelCase , __UpperCAmelCase )
__A : List[str] = list(__UpperCAmelCase )
with get_context("fork" ).Pool() as p:
__A : int = decoder.decode_beams_batch(__UpperCAmelCase , __UpperCAmelCase )
__A , __A , __A : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCAmelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCAmelCase , decoded_processor.lm_score )
def __UpperCAmelCase( self ):
__A : List[Any] = self.get_feature_extractor()
__A : int = self.get_tokenizer()
__A : List[Any] = self.get_decoder()
__A : int = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
__A : Union[str, Any] = self._get_dummy_logits()
__A : Dict = 15
__A : Any = -20.0
__A : Optional[Any] = -4.0
__A : str = processor.batch_decode(
__UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
__A : Any = decoded_processor_out.text
__A : Dict = list(__UpperCAmelCase )
with get_context("fork" ).Pool() as pool:
__A : Optional[int] = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
__A : List[Any] = [d[0][0] for d in decoded_decoder_out]
__A : Optional[Any] = [d[0][2] for d in decoded_decoder_out]
__A : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __UpperCAmelCase )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , __UpperCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , __UpperCAmelCase , atol=1e-3 ) )
def __UpperCAmelCase( self ):
__A : Dict = self.get_feature_extractor()
__A : int = self.get_tokenizer()
__A : Tuple = self.get_decoder()
__A : Tuple = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
__A : Any = self._get_dummy_logits()
__A : List[Any] = 2.0
__A : Any = 5.0
__A : List[str] = -20.0
__A : Tuple = True
__A : str = processor.batch_decode(
__UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
__A : Optional[int] = decoded_processor_out.text
__A : Any = list(__UpperCAmelCase )
decoder.reset_params(
alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
with get_context("fork" ).Pool() as pool:
__A : Any = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , )
__A : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __UpperCAmelCase )
__A : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__A : Union[str, Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__A : Union[str, Any] = os.listdir(__UpperCAmelCase )
__A : str = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = snapshot_download("hf-internal-testing/processor_with_lm" )
__A : str = WavaVecaProcessorWithLM.from_pretrained(__UpperCAmelCase )
__A : Dict = processor.decoder.model_container[processor.decoder._model_key]
__A : int = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__A : List[str] = os.listdir(__UpperCAmelCase )
__A : Optional[int] = os.listdir(__UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : int = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A : Tuple = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
__A : Tuple = floats_list((3, 1_000) )
__A : Union[str, Any] = processor_wavaveca(__UpperCAmelCase , return_tensors="np" )
__A : Tuple = processor_auto(__UpperCAmelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__A : Any = self._get_dummy_logits()
__A : List[str] = processor_wavaveca.batch_decode(__UpperCAmelCase )
__A : str = processor_auto.batch_decode(__UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __UpperCAmelCase( self ):
__A : Tuple = self.get_feature_extractor()
__A : str = self.get_tokenizer()
__A : Union[str, Any] = self.get_decoder()
__A : Dict = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase ):
__A : int = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase( self ):
__A : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A : List[str] = self._get_dummy_logits()[0]
__A : List[str] = processor.decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def __UpperCAmelCase( self ):
__A : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__A : Tuple = self._get_dummy_logits()
__A : Any = processor.batch_decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__UpperCAmelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __UpperCAmelCase( self ):
import torch
__A : int = load_dataset("common_voice" , "en" , split="train" , streaming=__UpperCAmelCase )
__A : Optional[int] = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
__A : int = iter(__UpperCAmelCase )
__A : List[Any] = next(__UpperCAmelCase )
__A : int = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
__A : Tuple = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__A : Dict = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
__A : Optional[Any] = model(__UpperCAmelCase ).logits.cpu().numpy()
__A : Union[str, Any] = processor.decode(logits[0] , output_word_offsets=__UpperCAmelCase )
__A : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__A : Union[str, Any] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
__A : Union[str, Any] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word" ) ) , __UpperCAmelCase )
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word" ) ) , output.text )
# output times
__A : Optional[int] = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "start_time" ) )
__A : Optional[Any] = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "end_time" ) )
# fmt: off
__A : Union[str, Any] = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
__A : List[Any] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
| 387
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
_a : int = 'pixel_values'
_a : Optional[Any] = False
_a : List[str] = TimmBackboneConfig
def __init__( self , a , **a ):
requires_backends(self , """timm""" )
super().__init__(__UpperCAmelCase )
snake_case__ : Dict =config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(__UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
snake_case__ : Union[str, Any] =getattr(__UpperCAmelCase , """use_pretrained_backbone""" , __UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case__ : Dict =config.out_indices if getattr(__UpperCAmelCase , """out_indices""" , __UpperCAmelCase ) is not None else (-1,)
snake_case__ : List[Any] =timm.create_model(
config.backbone , pretrained=__UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCAmelCase , **__UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case__ : Union[str, Any] =self._backbone.return_layers
snake_case__ : Dict ={layer["""module"""]: str(__UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__UpperCAmelCase )
@classmethod
def lowercase__ ( cls , a , *a , **a ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case__ : Any =kwargs.pop("""config""" , TimmBackboneConfig() )
snake_case__ : Optional[Any] =kwargs.pop("""use_timm_backbone""" , __UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
snake_case__ : int =kwargs.pop("""num_channels""" , config.num_channels )
snake_case__ : Optional[Any] =kwargs.pop("""features_only""" , config.features_only )
snake_case__ : List[str] =kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
snake_case__ : Union[str, Any] =kwargs.pop("""out_indices""" , config.out_indices )
snake_case__ : Union[str, Any] =TimmBackboneConfig(
backbone=__UpperCAmelCase , num_channels=__UpperCAmelCase , features_only=__UpperCAmelCase , use_pretrained_backbone=__UpperCAmelCase , out_indices=__UpperCAmelCase , )
return super()._from_config(__UpperCAmelCase , **__UpperCAmelCase )
def lowercase__ ( self , a ):
pass
def lowercase__ ( self , a , a=None , a=None , a=None , **a ):
snake_case__ : str =return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : List[Any] =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case__ : Optional[Any] =self._all_layers
snake_case__ : Any =self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
snake_case__ : Tuple =self._return_layers
snake_case__ : List[str] =tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case__ : Tuple =self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
snake_case__ : List[Any] =None
snake_case__ : Any =tuple(__UpperCAmelCase )
snake_case__ : Optional[int] =tuple(__UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
snake_case__ : List[Any] =(feature_maps,)
if output_hidden_states:
snake_case__ : Dict =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__UpperCAmelCase , hidden_states=__UpperCAmelCase , attentions=__UpperCAmelCase )
| 385
|
def a__ ( _UpperCamelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCamelCase = sorted(string.lower() )
return len(_UpperCamelCase ) == len(set(_UpperCamelCase ) )
if __name__ == "__main__":
a_ = input("""Enter a string """).strip()
a_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 175
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ : str = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a__ ( UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = os.path.join(UpperCAmelCase__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE : List[Any] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase__ ) )
def _lowercase ( self : Optional[Any] ) ->DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowercase ( self : Optional[Any] ) ->DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def _lowercase ( self : Optional[int] ) ->BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def _lowercase ( self : Any ) ->List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE : Tuple = dataset
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase ( self : List[Any] , UpperCAmelCase__ : bool ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """dataset""" )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
SCREAMING_SNAKE_CASE : Any = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ ) , )
return retriever
def _lowercase ( self : int ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
SCREAMING_SNAKE_CASE : Optional[Any] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(UpperCAmelCase__ , open(UpperCAmelCase__ , """wb""" ) )
SCREAMING_SNAKE_CASE : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE : Any = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : str = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : List[str] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Dict = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : str = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : int = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Dict ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Tuple ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = retriever.retrieve(UpperCAmelCase__ , n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Dict = retriever.retrieve(UpperCAmelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Tuple ) ->Tuple:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE : str = [[5, 7], [1_0, 1_1]]
SCREAMING_SNAKE_CASE : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : int = retriever(UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = [[5, 7], [1_0, 1_1]]
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Union[str, Any] = retriever(UpperCAmelCase__ , UpperCAmelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase__ )
self.assertEqual(
len(UpperCAmelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , UpperCAmelCase__ ) # check for doc token related keys in dictionary.
| 446
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(UpperCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(UpperCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(UpperCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , )
elif len(UpperCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case : str = logging.getLogger(__name__)
@dataclass
class A :
lowercase_ = field(metadata={'help': 'Which column contains the label'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} )
lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} )
lowercase_ = field(
default=128 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase_ = field(
default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A :
lowercase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase_ = field(
default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
def snake_case_ ():
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 22
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "distilbert"
a : Union[str, Any] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=512 ,_lowerCamelCase=False ,_lowerCamelCase=6 ,_lowerCamelCase=12 ,_lowerCamelCase=768 ,_lowerCamelCase=4 * 768 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.2 ,_lowerCamelCase=0 ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = sinusoidal_pos_embds
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dim
__lowercase = hidden_dim
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation
__lowercase = initializer_range
__lowercase = qa_dropout
__lowercase = seq_classif_dropout
super().__init__(**_lowerCamelCase ,pad_token_id=_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 502
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : str = ['onnx']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['onnx'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['onnx'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['onnx'] )
| 714
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696
| 0
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def a (lowerCAmelCase__ ):
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
"""repo_id""": str(lowerCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def a (lowerCAmelCase__ ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["""WORLD_SIZE"""] )
__a = int(os.environ["""N_GPU_NODE"""] )
__a = int(os.environ["""RANK"""] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def a (lowerCAmelCase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 99
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : str = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""decision_transformer"""
SCREAMING_SNAKE_CASE__ =["""past_key_values"""]
SCREAMING_SNAKE_CASE__ ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, _a=17, _a=4, _a=1_28, _a=40_96, _a=True, _a=1, _a=10_24, _a=3, _a=1, _a=None, _a="relu", _a=0.1, _a=0.1, _a=0.1, _a=1E-5, _a=0.02, _a=True, _a=True, _a=5_02_56, _a=5_02_56, _a=False, _a=False, **_a, ) -> str:
__SCREAMING_SNAKE_CASE = state_dim
__SCREAMING_SNAKE_CASE = act_dim
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = max_ep_len
__SCREAMING_SNAKE_CASE = action_tanh
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scale_attn_weights
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
__SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=_a, eos_token_id=_a, **_a )
| 214
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :List[str] ) -> str:
"""simple docstring"""
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( __snake_case :str , __snake_case :List[str] , __snake_case :Any , __snake_case :Optional[int]="attention" ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( __snake_case :Optional[int] , __snake_case :int , __snake_case :str , __snake_case :Dict=False ) -> List[Any]:
"""simple docstring"""
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__SCREAMING_SNAKE_CASE = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( __snake_case :Optional[Any] , __snake_case :str , __snake_case :Optional[Any] , __snake_case :List[str] ) -> List[str]:
"""simple docstring"""
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( __snake_case :dict , *, __snake_case :int , __snake_case :bool , __snake_case :bool = False ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables["target"] )
__SCREAMING_SNAKE_CASE = {"/".join(__snake_case ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__SCREAMING_SNAKE_CASE = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __snake_case )
__SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
__SCREAMING_SNAKE_CASE = old["token_embedder/embedding"]
# Encoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "encoder" , "attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "encoder" , "pre_mlp_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_mlp_lookup(__snake_case , __snake_case , "encoder" , __snake_case )
__SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = wi[0].T
__SCREAMING_SNAKE_CASE = wi[1].T
else:
__SCREAMING_SNAKE_CASE = wi.T
__SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , __snake_case , "encoder" ).T
__SCREAMING_SNAKE_CASE = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , 0 , "encoder" ).T
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
__snake_case , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__snake_case ):
# Block i, layer 0 (Self Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_self_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "self_attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_cross_attention_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_attention_lookup(__snake_case , __snake_case , "decoder" , "encoder_decoder_attention" )
__SCREAMING_SNAKE_CASE = layer_norm
__SCREAMING_SNAKE_CASE = k.T
__SCREAMING_SNAKE_CASE = o.T
__SCREAMING_SNAKE_CASE = q.T
__SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
__SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(__snake_case , __snake_case , "decoder" , "pre_mlp_layer_norm" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = tax_mlp_lookup(__snake_case , __snake_case , "decoder" , __snake_case )
__SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
__SCREAMING_SNAKE_CASE = wi[0].T
__SCREAMING_SNAKE_CASE = wi[1].T
else:
__SCREAMING_SNAKE_CASE = wi.T
__SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(__snake_case , __snake_case , "decoder" ).T
__SCREAMING_SNAKE_CASE = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__SCREAMING_SNAKE_CASE = old["decoder/logits_dense/kernel"].T
return new
def _A ( __snake_case :List[str] , __snake_case :bool ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__SCREAMING_SNAKE_CASE = state_dict["shared.weight"]
return state_dict
def _A ( __snake_case :int , __snake_case :List[Any] , __snake_case :str , __snake_case :Tuple , __snake_case :str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(__snake_case )
__SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(
__snake_case , num_layers=config.num_layers , is_encoder_only=__snake_case , scalable_attention=__snake_case )
__SCREAMING_SNAKE_CASE = make_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case , strict=__snake_case )
def _A ( __snake_case :Optional[int] , __snake_case :int , __snake_case :List[Any] , __snake_case :bool = False , __snake_case :bool = False , ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(__snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__SCREAMING_SNAKE_CASE = UMTaEncoderModel(__snake_case )
else:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(__snake_case )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__snake_case )
# Verify that we can load the checkpoint.
model.from_pretrained(__snake_case )
print("Done" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_snake_case : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 214
| 1
|
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : Union[str, Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 176
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
A : Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
A : Any = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
A : str = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
A : Optional[Any] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
A : Any = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
A : str = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __lowerCamelCase ( __a :List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , __a )
return [m.group(0 ) for m in matches]
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A__ = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
A__ = collections.defaultdict(__a )
A__ = collections.defaultdict(__a )
A__ = collections.defaultdict(__a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__a ):
A__ = None
if _re_tf_models.match(__a ) is not None:
A__ = tf_models
A__ = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
A__ = flax_models
A__ = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
A__ = pt_models
A__ = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_prefix_to_model_type:
A__ = True
break
# Try again after removing the last word in the name
A__ = """""".join(camel_case_split(__a )[:-1] )
A__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
A__ = list(__a )
all_models.sort()
A__ = {"""model_type""": all_models}
A__ = [pt_models[t] for t in all_models]
A__ = [tf_models[t] for t in all_models]
A__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
A__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
A__ = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
A__ = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
A__ = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
A__ = """AutoTokenizer"""
A__ = [processors[t] for t in all_models]
return pd.DataFrame(__a )
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
A__ = [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}']
A__ = [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(__a , __a , __a ):
# The type of pipeline may not exist in this framework
if not hasattr(__a , __a ):
continue
# First extract all model_names
A__ = []
for name in getattr(__a , __a ).values():
if isinstance(__a , __a ):
model_names.append(__a )
else:
model_names.extend(list(__a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __lowerCamelCase ( __a :Dict , __a :Optional[Any] ) -> Any:
"""simple docstring"""
A__ = get_frameworks_table()
A__ = Dataset.from_pandas(__a )
A__ = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=__a )
A__ = Dataset.from_json(__a )
A__ = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(__a ) )
}
A__ = update_pipeline_and_auto_class_table(__a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
A__ = sorted(table.keys() )
A__ = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
A__ = Dataset.from_pandas(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__a , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(__a , """pipeline_tags.json""" ) )
if commit_sha is not None:
A__ = (
F'Update with commit {commit_sha}\n\nSee: '
F'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
A__ = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=__a , repo_type="""dataset""" , token=__a , commit_message=__a , )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
A__ = transformers_module.pipelines.SUPPORTED_TASKS
A__ = []
for key in pipeline_tasks:
if key not in in_table:
A__ = pipeline_tasks[key]["""pt"""]
if isinstance(__a , (list, tuple) ):
A__ = model[0]
A__ = model.__name__
if model not in in_table.values():
missing.append(__a )
if len(__a ) > 0:
A__ = """, """.join(__a )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
A : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 176
| 1
|
"""simple docstring"""
import pprint
import requests
UpperCAmelCase ="https://zenquotes.io/api"
def _A ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _A ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase =random_quotes()
pprint.pprint(response)
| 255
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''umt5'''
_lowerCamelCase = ['''past_key_values''']
def __init__( self ,lowerCamelCase_=2_5_0_1_1_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=6_4 ,lowerCamelCase_=1_0_2_4 ,lowerCamelCase_=8 ,lowerCamelCase_=None ,lowerCamelCase_=6 ,lowerCamelCase_=3_2 ,lowerCamelCase_=1_2_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-6 ,lowerCamelCase_=1.0 ,lowerCamelCase_="gated-gelu" ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="T5Tokenizer" ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def UpperCamelCase__ ( self ) -> Dict:
return self.d_model
@property
def UpperCamelCase__ ( self ) -> Any:
return self.num_heads
@property
def UpperCamelCase__ ( self ) -> int:
return self.num_layers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase__ ( self ) -> int:
return 1_3
@property
def UpperCamelCase__ ( self ) -> float:
return 5E-4
| 255
| 1
|
import numpy as np
def __lowercase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float ):
return np.where(vector > 0 , __lowerCAmelCase , (alpha * (np.exp(__lowerCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case : str = logging.get_logger(__name__)
snake_case : List[str] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = '''deformable_detr'''
UpperCAmelCase__ : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self :Optional[int] ,__snake_case :List[Any]=True ,__snake_case :str=None ,__snake_case :Optional[Any]=3 ,__snake_case :int=3_00 ,__snake_case :Optional[int]=10_24 ,__snake_case :Union[str, Any]=6 ,__snake_case :Optional[int]=10_24 ,__snake_case :List[str]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :int=10_24 ,__snake_case :List[str]=8 ,__snake_case :List[str]=0.0 ,__snake_case :Optional[int]=True ,__snake_case :Any="relu" ,__snake_case :List[str]=2_56 ,__snake_case :List[str]=0.1 ,__snake_case :Dict=0.0 ,__snake_case :Optional[int]=0.0 ,__snake_case :List[Any]=0.02 ,__snake_case :Union[str, Any]=1.0 ,__snake_case :List[str]=True ,__snake_case :Union[str, Any]=False ,__snake_case :List[Any]="sine" ,__snake_case :Tuple="resnet50" ,__snake_case :Dict=True ,__snake_case :Tuple=False ,__snake_case :str=4 ,__snake_case :Union[str, Any]=4 ,__snake_case :List[Any]=4 ,__snake_case :Optional[Any]=False ,__snake_case :str=3_00 ,__snake_case :Tuple=False ,__snake_case :Union[str, Any]=1 ,__snake_case :str=5 ,__snake_case :str=2 ,__snake_case :Dict=1 ,__snake_case :Any=1 ,__snake_case :Union[str, Any]=5 ,__snake_case :Tuple=2 ,__snake_case :Any=0.1 ,__snake_case :str=0.25 ,__snake_case :int=False ,**__snake_case :Optional[int] ,) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
a__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__snake_case ,__snake_case ):
a__ = backbone_config.get('model_type' )
a__ = CONFIG_MAPPING[backbone_model_type]
a__ = config_class.from_dict(__snake_case )
a__ = use_timm_backbone
a__ = backbone_config
a__ = num_channels
a__ = num_queries
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = init_xavier_std
a__ = encoder_layerdrop
a__ = auxiliary_loss
a__ = position_embedding_type
a__ = backbone
a__ = use_pretrained_backbone
a__ = dilation
# deformable attributes
a__ = num_feature_levels
a__ = encoder_n_points
a__ = decoder_n_points
a__ = two_stage
a__ = two_stage_num_proposals
a__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
a__ = class_cost
a__ = bbox_cost
a__ = giou_cost
# Loss coefficients
a__ = mask_loss_coefficient
a__ = dice_loss_coefficient
a__ = bbox_loss_coefficient
a__ = giou_loss_coefficient
a__ = eos_coefficient
a__ = focal_alpha
a__ = disable_custom_kernels
super().__init__(is_encoder_decoder=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> int:
return self.encoder_attention_heads
@property
def lowerCamelCase__( self :int ) -> int:
return self.d_model
def lowerCamelCase__( self :List[str] ) -> str:
a__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a__ = self.backbone_config.to_dict()
a__ = self.__class__.model_type
return output
| 335
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 4_2
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
@register_to_config
def __init__( self , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = ("DownEncoderBlock2D",) , _lowerCAmelCase = ("UpDecoderBlock2D",) , _lowerCAmelCase = (64,) , _lowerCAmelCase = 1 , _lowerCAmelCase = "silu" , _lowerCAmelCase = 3 , _lowerCAmelCase = 32 , _lowerCAmelCase = 256 , _lowerCAmelCase = 32 , _lowerCAmelCase = None , _lowerCAmelCase = 0.18_215 , _lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
_lowerCAmelCase = Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
_lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCAmelCase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
_lowerCAmelCase = VectorQuantizer(_lowerCAmelCase , _lowerCAmelCase , beta=0.25 , remap=_lowerCAmelCase , sane_index_shape=_lowerCAmelCase )
_lowerCAmelCase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
# pass init params to Decoder
_lowerCAmelCase = Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , norm_type=_lowerCAmelCase , )
@apply_forward_hook
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = True ):
_lowerCAmelCase = self.encoder(_lowerCAmelCase )
_lowerCAmelCase = self.quant_conv(_lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCAmelCase )
@apply_forward_hook
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.quantize(_lowerCAmelCase )
else:
_lowerCAmelCase = h
_lowerCAmelCase = self.post_quant_conv(_lowerCAmelCase )
_lowerCAmelCase = self.decoder(_lowerCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = True ):
_lowerCAmelCase = sample
_lowerCAmelCase = self.encode(_lowerCAmelCase ).latents
_lowerCAmelCase = self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 702
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664
| 0
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
super().__init__()
A_ : List[Any] = nn.Linear(3 , 4 )
A_ : int = nn.BatchNormad(4 )
A_ : List[Any] = nn.Linear(4 , 5 )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase , model.state_dict() )
A_ : List[str] = os.path.join(lowercase , 'index.json' )
self.assertTrue(os.path.isfile(lowercase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
A_ : Any = os.path.join(lowercase , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(lowercase ) )
# TODO: add tests on the fact weights are properly loaded
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
A_ : Tuple = torch.randn(2 , 3 , dtype=lowercase )
with TemporaryDirectory() as tmp_dir:
A_ : Any = offload_weight(lowercase , 'weight' , lowercase , {} )
A_ : Any = os.path.join(lowercase , 'weight.dat' )
self.assertTrue(os.path.isfile(lowercase ) )
self.assertDictEqual(lowercase , {'weight': {'shape': [2, 3], 'dtype': str(lowercase ).split('.' )[1]}} )
A_ : Optional[Any] = load_offloaded_weight(lowercase , index['weight'] )
self.assertTrue(torch.equal(lowercase , lowercase ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = ModelForTest()
A_ : List[Any] = model.state_dict()
A_ : int = {k: v for k, v in state_dict.items() if 'linear2' not in k}
A_ : Any = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase , lowercase )
A_ : Tuple = OffloadedWeightsLoader(state_dict=lowercase , save_folder=lowercase )
# Every key is there with the right value
self.assertEqual(sorted(lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase , weight_map[key] ) )
A_ : List[Any] = {k: v for k, v in state_dict.items() if 'weight' in k}
A_ : List[Any] = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase , lowercase )
A_ : Any = OffloadedWeightsLoader(state_dict=lowercase , save_folder=lowercase )
# Every key is there with the right value
self.assertEqual(sorted(lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowercase , lowercase )
# Duplicates are removed
A_ : Optional[Any] = OffloadedWeightsLoader(state_dict=lowercase , save_folder=lowercase )
# Every key is there with the right value
self.assertEqual(sorted(lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowercase , weight_map[key] ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = {'a.1': 0, 'a.10': 1, 'a.2': 2}
A_ : Any = extract_submodules_state_dict(lowercase , ['a.1', 'a.2'] )
self.assertDictEqual(lowercase , {'a.1': 0, 'a.2': 2} )
A_ : Dict = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
A_ : Optional[Any] = extract_submodules_state_dict(lowercase , ['a.1', 'a.2'] )
self.assertDictEqual(lowercase , {'a.1.a': 0, 'a.2.a': 2} )
| 558
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : Tuple = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.convolution(self.padding(lowercase ) )
A_ : Optional[Any] = self.normalization(lowercase )
A_ : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Dict = config.num_channels
A_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Dict = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Any = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.pooler(lowercase )
for layer_module in self.attention:
A_ : List[str] = layer_module(lowercase )
A_ : str = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Optional[Any] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : int = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = hidden_state
for layer_module in self.layers:
A_ : Union[str, Any] = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : Optional[int] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
A_ : str = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = hidden_state
for layer_module in self.layers:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Tuple = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : int = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : List[str] = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = config
A_ : List[str] = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : Dict = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.embedder(lowercase , training=lowercase )
A_ : int = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : List[Any] = encoder_outputs[0]
A_ : Any = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : Dict = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : Union[str, Any] = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : str = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Dict = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Any = outputs.pooler_output if return_dict else outputs[1]
A_ : Union[str, Any] = self.classifier[0](lowercase )
A_ : Dict = self.classifier[1](lowercase )
A_ : Dict = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 558
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = """realm"""
def __init__( self , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=2_56 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=1E-3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=3_20 , __SCREAMING_SNAKE_CASE=13_35_37_18 , __SCREAMING_SNAKE_CASE=50_00 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# Common config
UpperCamelCase__ : Dict =vocab_size
UpperCamelCase__ : Dict =max_position_embeddings
UpperCamelCase__ : Optional[int] =hidden_size
UpperCamelCase__ : Optional[Any] =retriever_proj_size
UpperCamelCase__ : Dict =num_hidden_layers
UpperCamelCase__ : int =num_attention_heads
UpperCamelCase__ : Union[str, Any] =num_candidates
UpperCamelCase__ : int =intermediate_size
UpperCamelCase__ : Union[str, Any] =hidden_act
UpperCamelCase__ : Tuple =hidden_dropout_prob
UpperCamelCase__ : List[str] =attention_probs_dropout_prob
UpperCamelCase__ : Tuple =initializer_range
UpperCamelCase__ : str =type_vocab_size
UpperCamelCase__ : str =layer_norm_eps
# Reader config
UpperCamelCase__ : Union[str, Any] =span_hidden_size
UpperCamelCase__ : Optional[int] =max_span_width
UpperCamelCase__ : Any =reader_layer_norm_eps
UpperCamelCase__ : Dict =reader_beam_size
UpperCamelCase__ : Tuple =reader_seq_len
# Retrieval config
UpperCamelCase__ : Tuple =num_block_records
UpperCamelCase__ : Optional[Any] =searcher_beam_size
| 716
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCamelCase__ : List[Any] =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCamelCase__ : Optional[Any] =tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCamelCase__ : int =tf_top_k_top_p_filtering(__SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
UpperCamelCase__ : List[str] =output[output != -float("inf")]
UpperCamelCase__ : str =tf.cast(
tf.where(tf.not_equal(__SCREAMING_SNAKE_CASE , tf.constant(-float("inf") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-12)
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@require_tf
class lowercase__( unittest.TestCase , snake_case__ ):
'''simple docstring'''
if is_tf_available():
snake_case__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
UpperCamelCase__ : Tuple =2
UpperCamelCase__ : Any =2
class lowercase__( tf.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE) -> Optional[int]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self).__init__()
UpperCamelCase__ : Any =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids"),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask"),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
UpperCamelCase__ : int =self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
UpperCamelCase__ : str =[[2, 0], [1_02, 1_03]]
UpperCamelCase__ : int =[[1, 0], [1, 1]]
UpperCamelCase__ : Dict =DummyModel(model=__SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={"serving_default": dummy_model.serving})
UpperCamelCase__ : Optional[int] =tf.saved_model.load(__SCREAMING_SNAKE_CASE).signatures["serving_default"]
for batch_size in range(1 , len(__SCREAMING_SNAKE_CASE) + 1):
UpperCamelCase__ : List[str] ={
"input_ids": tf.constant(dummy_input_ids[:batch_size]),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size]),
}
UpperCamelCase__ : str =serving_func(**__SCREAMING_SNAKE_CASE)["sequences"]
UpperCamelCase__ : List[str] =test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE)
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
UpperCamelCase__ : Optional[Any] =1
UpperCamelCase__ : Optional[Any] =2
class lowercase__( tf.Module ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self).__init__()
UpperCamelCase__ : int =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids"),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask"),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
UpperCamelCase__ : List[str] =[[2], [1_02, 1_03]]
UpperCamelCase__ : Tuple =[[1], [1, 1]]
UpperCamelCase__ : int =DummyModel(model=__SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={"serving_default": dummy_model.serving})
UpperCamelCase__ : Union[str, Any] =tf.saved_model.load(__SCREAMING_SNAKE_CASE).signatures["serving_default"]
for input_row in range(len(__SCREAMING_SNAKE_CASE)):
UpperCamelCase__ : Any ={
"input_ids": tf.constant([dummy_input_ids[input_row]]),
"attention_mask": tf.constant([dummy_attention_masks[input_row]]),
}
UpperCamelCase__ : List[Any] =serving_func(**__SCREAMING_SNAKE_CASE)["sequences"]
UpperCamelCase__ : Tuple =test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE)
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
@slow
@require_tensorflow_text
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=__SCREAMING_SNAKE_CASE)
class lowercase__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Optional[int] =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__SCREAMING_SNAKE_CASE , "spiece.model") , "rb").read())
UpperCamelCase__ : Dict =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =self.tokenizer.tokenize(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ , UpperCamelCase__ : Tuple =text.pad_model_inputs(
__SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
UpperCamelCase__ : Any =self.model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
return self.tokenizer.detokenize(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] =CompleteSentenceTransformer()
UpperCamelCase__ : int =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs")
UpperCamelCase__ : List[str] =complete_model(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =tf.keras.Model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
keras_model.save(__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple ={
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
UpperCamelCase__ : Union[str, Any] =14
UpperCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
UpperCamelCase__ : int ="Hello, my dog is cute and"
UpperCamelCase__ : Union[str, Any] =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="tf")
UpperCamelCase__ : Optional[int] =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
UpperCamelCase__ : Optional[int] =6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
UpperCamelCase__ : Optional[Any] =model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
self.assertTrue(expectation == len(generated_tokens[0]))
UpperCamelCase__ : Optional[Any] =[6_38, 1_98]
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
UpperCamelCase__ : str =model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
self.assertTrue(expectation == len(generated_tokens[0]))
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
UpperCamelCase__ : int ="Hugging Face is a technology company based in New York and Paris."
UpperCamelCase__ : str =bart_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="tf").input_ids
UpperCamelCase__ : int =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart")
UpperCamelCase__ : int =bart_model.generate(__SCREAMING_SNAKE_CASE).numpy()
class lowercase__( snake_case__ ):
'''simple docstring'''
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE) -> List[str]:
"""simple docstring"""
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart")
UpperCamelCase__ : Dict =bart_model.generate(__SCREAMING_SNAKE_CASE , foo="bar").numpy()
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
class lowercase__( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] =FakeEncoder(bart_model.config , bart_model.model.shared)
UpperCamelCase__ : List[Any] =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCamelCase__ : Dict =bart_model.generate(__SCREAMING_SNAKE_CASE).numpy()
with self.assertRaises(__SCREAMING_SNAKE_CASE):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__SCREAMING_SNAKE_CASE , foo="bar")
| 582
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_snake_case : Optional[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_snake_case : Dict = "UperNetConfig"
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
__snake_case : Union[str, Any] = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
__snake_case : Dict = nn.BatchNormad(lowerCamelCase )
__snake_case : List[Any] = nn.ReLU()
def __snake_case ( self : List[Any] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : Dict = self.conv(lowerCamelCase )
__snake_case : int = self.batch_norm(lowerCamelCase )
__snake_case : Optional[Any] = self.activation(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
__snake_case : Tuple = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
__snake_case : List[str] = input
for layer in self.layers:
__snake_case : Tuple = layer(lowerCamelCase )
return hidden_state
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
__snake_case : Dict = pool_scales
__snake_case : List[str] = align_corners
__snake_case : List[Any] = in_channels
__snake_case : str = channels
__snake_case : Optional[Any] = []
for i, pool_scale in enumerate(lowerCamelCase ):
__snake_case : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
__snake_case : Tuple = []
for ppm in self.blocks:
__snake_case : Any = ppm(lowerCamelCase )
__snake_case : List[Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class a (nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
super().__init__()
__snake_case : Dict = config
__snake_case : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
__snake_case : Tuple = in_channels
__snake_case : str = config.hidden_size
__snake_case : List[str] = False
__snake_case : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__snake_case : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__snake_case : List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__snake_case : List[Any] = nn.ModuleList()
__snake_case : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__snake_case : Union[str, Any] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
__snake_case : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
__snake_case : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __snake_case ( self : List[str] ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> str:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : List[Any] , lowerCamelCase : Tuple ) -> Optional[int]:
__snake_case : str = inputs[-1]
__snake_case : int = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
__snake_case : Tuple = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Union[str, Any] = self.bottleneck(lowerCamelCase )
return output
def __snake_case ( self : int , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
__snake_case : Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
__snake_case : Dict = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Union[str, Any] = laterals[i - 1].shape[2:]
__snake_case : Optional[int] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
__snake_case : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__snake_case : Tuple = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
__snake_case : str = torch.cat(lowerCamelCase , dim=1 )
__snake_case : Optional[Any] = self.fpn_bottleneck(lowerCamelCase )
__snake_case : Tuple = self.classifier(lowerCamelCase )
return output
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
__snake_case : List[Any] = config
__snake_case : List[str] = config.auxiliary_in_channels
__snake_case : List[Any] = config.auxiliary_channels
__snake_case : Tuple = config.auxiliary_num_convs
__snake_case : int = config.auxiliary_concat_input
__snake_case : Optional[int] = in_index
__snake_case : Tuple = (kernel_size // 2) * dilation
__snake_case : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
__snake_case : Union[str, Any] = nn.Identity()
else:
__snake_case : Any = nn.Sequential(*lowerCamelCase )
if self.concat_input:
__snake_case : int = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
__snake_case : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __snake_case ( self : Dict ) -> Optional[Any]:
self.apply(self._init_weights )
def __snake_case ( self : Tuple , lowerCamelCase : Tuple ) -> Optional[int]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __snake_case ( self : Optional[int] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
__snake_case : List[str] = encoder_hidden_states[self.in_index]
__snake_case : Optional[Any] = self.convs(lowerCamelCase )
if self.concat_input:
__snake_case : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__snake_case : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = UperNetConfig
__UpperCAmelCase : int = "pixel_values"
__UpperCAmelCase : str = True
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __snake_case ( self : Optional[Any] ) -> List[str]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=False ) -> Dict:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = value
_snake_case : Dict = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case : Tuple = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , _lowerCAmelCase , )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : int ) -> Optional[int]:
super().__init__(lowerCamelCase )
__snake_case : Any = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__snake_case : Union[str, Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
__snake_case : Any = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case : str = output_attentions if output_attentions is not None else self.config.output_attentions
__snake_case : Tuple = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
__snake_case : List[Any] = outputs.feature_maps
__snake_case : List[Any] = self.decode_head(lowerCamelCase )
__snake_case : List[str] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : Optional[int] = None
if self.auxiliary_head is not None:
__snake_case : Dict = self.auxiliary_head(lowerCamelCase )
__snake_case : Dict = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
__snake_case : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__snake_case : Union[str, Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = loss_fct(lowerCamelCase , lowerCamelCase )
__snake_case : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__snake_case : Any = (logits,) + outputs[1:]
else:
__snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 81
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : str =16
_lowercase : int =32
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ = 16 ,lowerCAmelCase__ = "bert-base-cased" ):
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
lowerCamelCase_ : Tuple = load_dataset('glue' ,'mrpc' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : Optional[Any] = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=lowerCAmelCase__ ,max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : Dict = datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,remove_columns=['idx', 'sentence1', 'sentence2'] ,load_from_cache_file=lowerCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ : List[Any] = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ ,padding='max_length' ,max_length=1_28 ,return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase__ ,padding='longest' ,return_tensors='pt' )
# Instantiate dataloaders.
lowerCamelCase_ : int = DataLoader(
tokenized_datasets['train'] ,shuffle=lowerCAmelCase__ ,collate_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ )
lowerCamelCase_ : str = DataLoader(
tokenized_datasets['validation'] ,shuffle=lowerCAmelCase__ ,collate_fn=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
model.eval()
lowerCamelCase_ : List[str] = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ : Dict = model(**lowerCAmelCase__ )
lowerCamelCase_ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCamelCase_ , lowerCamelCase_ : Tuple = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase__ ) - 1:
lowerCamelCase_ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCamelCase_ : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase__ ,references=lowerCAmelCase__ ,)
lowerCamelCase_ : Tuple = metric.compute()
return eval_metric["accuracy"]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
# Initialize accelerator
lowerCamelCase_ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : Union[str, Any] = config['lr']
lowerCamelCase_ : List[Any] = int(config['num_epochs'] )
lowerCamelCase_ : Optional[int] = int(config['seed'] )
lowerCamelCase_ : Optional[int] = int(config['batch_size'] )
lowerCamelCase_ : Optional[Any] = args.model_name_or_path
set_seed(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = get_dataloaders(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ ,return_dict=lowerCAmelCase__ )
# Instantiate optimizer
lowerCamelCase_ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ : Union[str, Any] = optimizer_cls(params=model.parameters() ,lr=lowerCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = (len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ ,num_warmup_steps=0 ,num_training_steps=lowerCAmelCase__ ,)
else:
lowerCamelCase_ : Optional[int] = DummyScheduler(lowerCAmelCase__ ,total_num_steps=lowerCAmelCase__ ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = accelerator.prepare(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Tuple = evaluate.load('glue' ,'mrpc' )
lowerCamelCase_ : List[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCamelCase_ : Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ : List[Any] = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCamelCase_ : str = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCamelCase_ : List[str] = int(lowerCAmelCase__ ) + 1
lowerCamelCase_ : Optional[Any] = evaluation_loop(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
accelerator.print('resumed checkpoint performance:' ,lowerCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' ,lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' ,optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir ,F"state_{starting_epoch-1}.json" ) ,'r' ) as f:
lowerCamelCase_ : Union[str, Any] = json.load(lowerCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCamelCase_ : str = {}
for epoch in range(lowerCAmelCase__ ,lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = model(**lowerCAmelCase__ )
lowerCamelCase_ : Tuple = outputs.loss
lowerCamelCase_ : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCamelCase_ : int = F"epoch_{epoch}"
lowerCamelCase_ : Optional[int] = os.path.join(args.output_dir ,lowerCAmelCase__ )
accelerator.save_state(lowerCAmelCase__ )
lowerCamelCase_ : Optional[Any] = evaluation_loop(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ : List[str] = accuracy
lowerCamelCase_ : List[str] = lr_scheduler.get_lr()[0]
lowerCamelCase_ : str = optimizer.param_groups[0]['lr']
lowerCamelCase_ : Any = epoch
lowerCamelCase_ : Union[str, Any] = overall_step
accelerator.print(F"epoch {epoch}:" ,lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,F"state_{epoch}.json" ) ,'w' ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' ,type=lowerCAmelCase__ ,default='bert-base-cased' ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=lowerCAmelCase__ ,)
parser.add_argument(
'--output_dir' ,type=lowerCAmelCase__ ,default='.' ,help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' ,)
parser.add_argument(
'--resume_from_checkpoint' ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ ,help='If the training should continue from a checkpoint folder.' ,)
parser.add_argument(
'--partial_train_epoch' ,type=lowerCAmelCase__ ,default=lowerCAmelCase__ ,help='If passed, the training will stop after this number of epochs.' ,)
parser.add_argument(
'--num_epochs' ,type=lowerCAmelCase__ ,default=2 ,help='Number of train epochs.' ,)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
lowerCamelCase_ : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 364
| 0
|
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] ):
"""simple docstring"""
_a : List[str] = len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
_a, _a : int = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 319
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = GPTSwaTokenizer
__UpperCAmelCase : Any = False
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : Any = GPTSwaTokenizer(_a ,eos_token='<unk>' ,bos_token='<unk>' ,pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
_a : Optional[int] = 'This is a test'
_a : str = 'This is a test'
return input_text, output_text
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Tuple = '<s>'
_a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<unk>' )
self.assertEqual(vocab_keys[1] ,'<s>' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(_a ) ,2000 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,2000 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = GPTSwaTokenizer(_a )
_a : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(_a ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[465, 287, 265, 631, 842] )
_a : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_a ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] ,)
# fmt: on
_a : str = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a ,[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
_a : str = tokenizer.convert_ids_to_tokens(_a )
# fmt: off
self.assertListEqual(
_a ,['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = GPTSwaTokenizer(_a )
_a : List[Any] = ['This is a test', 'I was born in 92000, and this is falsé.']
_a : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_a ,_a ):
self.assertListEqual(tokenizer.encode_fast(_a ) ,_a )
# Test that decode_fast returns the input text
for text, token_ids in zip(_a ,_a ):
self.assertEqual(tokenizer.decode_fast(_a ) ,_a )
@slow
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_a : Union[str, Any] = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name='AI-Sweden/gpt-sw3-126m' ,sequences=_a ,)
| 319
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase__ = '''base_with_context'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Union[str, Any],_SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__A= nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__A= nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ),requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
__A= weights[f"""layers_{lyr_num}"""]
__A= nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__A= ly_weight['attention']
__A= nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__A= nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ),requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
__A= weights[f"""layers_{lyr_num}"""]
__A= ly_weight['attention']
__A= nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__A= nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__A= nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[str],_SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__A= nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__A= nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ),requires_grad=_SCREAMING_SNAKE_CASE )
__A= nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__A= weights[f"""layers_{lyr_num}"""]
__A= nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__A= nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__A= ly_weight['self_attention']
__A= nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__A= ly_weight['MultiHeadDotProductAttention_0']
__A= nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__A= nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__A= nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__A= nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__A= nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__A= checkpoints.load_tax_checkpoint(args.checkpoint_path )
__A= jnp.tree_util.tree_map(onp.array,_SCREAMING_SNAKE_CASE )
__A= [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__A= os.path.join(args.checkpoint_path,'..','config.gin' )
__A= inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
__A= inference.InferenceModel(args.checkpoint_path,_SCREAMING_SNAKE_CASE )
__A= DDPMScheduler(beta_schedule='squaredcos_cap_v2',variance_type='fixed_large' )
__A= SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'],vocab_size=synth_model.model.module.config.vocab_size,d_model=synth_model.model.module.config.emb_dim,dropout_rate=synth_model.model.module.config.dropout_rate,num_layers=synth_model.model.module.config.num_encoder_layers,num_heads=synth_model.model.module.config.num_heads,d_kv=synth_model.model.module.config.head_dim,d_ff=synth_model.model.module.config.mlp_dim,feed_forward_proj='gated-gelu',)
__A= SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims,targets_context_length=synth_model.sequence_length['targets_context'],d_model=synth_model.model.module.config.emb_dim,dropout_rate=synth_model.model.module.config.dropout_rate,num_layers=synth_model.model.module.config.num_encoder_layers,num_heads=synth_model.model.module.config.num_heads,d_kv=synth_model.model.module.config.head_dim,d_ff=synth_model.model.module.config.mlp_dim,feed_forward_proj='gated-gelu',)
__A= TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims,targets_length=synth_model.sequence_length['targets_context'],max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time,d_model=synth_model.model.module.config.emb_dim,num_layers=synth_model.model.module.config.num_decoder_layers,num_heads=synth_model.model.module.config.num_heads,d_kv=synth_model.model.module.config.head_dim,d_ff=synth_model.model.module.config.mlp_dim,dropout_rate=synth_model.model.module.config.dropout_rate,)
__A= load_notes_encoder(ta_checkpoint['target']['token_encoder'],_SCREAMING_SNAKE_CASE )
__A= load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'],_SCREAMING_SNAKE_CASE )
__A= load_decoder(ta_checkpoint['target']['decoder'],_SCREAMING_SNAKE_CASE )
__A= OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__A= SpectrogramDiffusionPipeline(
notes_encoder=_SCREAMING_SNAKE_CASE,continuous_encoder=_SCREAMING_SNAKE_CASE,decoder=_SCREAMING_SNAKE_CASE,scheduler=_SCREAMING_SNAKE_CASE,melgan=_SCREAMING_SNAKE_CASE,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
UpperCAmelCase__ = parser.parse_args()
main(args)
| 186
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCAmelCase_ : Optional[Union[float, List[float]]] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Union[str, Any]=30 , lowerCAmelCase_ : str=400 , lowerCAmelCase_ : int=3 , ) -> str:
__A= parent
__A= do_resize
__A= size if size is not None else {'shortest_edge': 288}
__A= size_divisor
__A= do_rescale
__A= rescale_factor
__A= do_normalize
__A= do_center_crop
__A= image_mean
__A= image_std
__A= do_pad
__A= batch_size
__A= num_channels
__A= min_resolution
__A= max_resolution
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str=False ) -> str:
if not batched:
__A= self.size['shortest_edge']
__A= image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image ):
__A, __A= image.size
else:
__A, __A= image.shape[1], image.shape[2]
__A= size / min(lowerCAmelCase_ , lowerCAmelCase_ )
if h < w:
__A, __A= size, scale * w
else:
__A, __A= scale * h, size
__A= int((1_333 / 800) * size )
if max(lowerCAmelCase_ , lowerCAmelCase_ ) > max_size:
__A= max_size / max(lowerCAmelCase_ , lowerCAmelCase_ )
__A= newh * scale
__A= neww * scale
__A, __A= int(newh + 0.5 ), int(neww + 0.5 )
__A, __A= (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__A= []
for image in image_inputs:
__A, __A= self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A= max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[0] )[0]
__A= max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( a_ , unittest.TestCase ):
'''simple docstring'''
A : int = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
__A= BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
__A= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size_divisor' ) )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
# Initialize image processor
__A= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
__A= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A, __A= self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A= image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
__A, __A= self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Any ) -> Any:
# Initialize image processor
__A= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
__A= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A, __A= self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A= image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
__A, __A= self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
# Initialize image processor
__A= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
__A= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A, __A= self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A= image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
__A, __A= self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 186
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :Any = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :Optional[Any] = do_resize
UpperCamelCase__ :Any = size
UpperCamelCase__ :Any = do_normalize
UpperCamelCase__ :Any = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Tuple = do_rescale
UpperCamelCase__ :int = rescale_factor
UpperCamelCase__ :int = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :Any = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :List[str] = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Any = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :Union[str, Any] = self.size['''shortest_edge''']
UpperCamelCase__ :List[Any] = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :Tuple = self.size['''shortest_edge''']
UpperCamelCase__ :Union[str, Any] = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Tuple = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :str = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :int = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Dict = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Any = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Any = json.loads(f.read() )
UpperCamelCase__ :Tuple = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :Dict = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :int = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :int = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Union[str, Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Optional[int] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :Optional[int] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :str = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Union[str, Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
| 280
|
'''simple docstring'''
import qiskit
def a ( __a , __a ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCamelCase__ :int = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ :Any = qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase__ :Optional[int] = qiskit.execute(__a , __a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 280
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> None:
a_ = value
a_ = None
a_ = None
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> None:
a_ = tree
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697
| 1
|
SCREAMING_SNAKE_CASE : List[Any] = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE : Union[str, Any] = 100_0003
def __A ( _A , _A ):
"""simple docstring"""
__a = len(_A )
__a = len(_A )
if p_len > t_len:
return False
__a = 0
__a = 0
__a = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
__a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__a = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__a = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __A ( ):
"""simple docstring"""
__a = '''abc1abc12'''
__a = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__a = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
__a = '''ABABX'''
__a = '''ABABZABABYABABX'''
assert rabin_karp(_A , _A )
# Test 3)
__a = '''AAAB'''
__a = '''ABAAAAAB'''
assert rabin_karp(_A , _A )
# Test 4)
__a = '''abcdabcy'''
__a = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_A , _A )
# Test 5)
__a = '''Lü'''
__a = '''Lüsai'''
assert rabin_karp(_A , _A )
__a = '''Lue'''
assert not rabin_karp(_A , _A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 718
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A_ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : Dict ):
__a = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
__a = AutoTokenizer.from_pretrained("xlm-roberta-base" )
__a = "The dog is cute and lives in the garden house"
__a = jnp.array([tokenizer.encode(__SCREAMING_SNAKE_CASE )] )
__a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
__a = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
__a = model(__SCREAMING_SNAKE_CASE )["last_hidden_state"]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 525
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Dict = ''
SCREAMING_SNAKE_CASE: Dict = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(self , **lowerCamelCase__ )
lowerCAmelCase_: Any = repo_info
lowerCAmelCase_: Any = token
lowerCAmelCase_: int = None
def _a ( self ):
if self.dir_cache is None:
lowerCAmelCase_: int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCAmelCase_: Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(lowerCamelCase__ ): {"name": str(lowerCamelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ = "rb" , **lowerCamelCase__ , ):
if not isinstance(self.repo_info , lowerCamelCase__ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowerCAmelCase_: Union[str, Any] = hf_hub_url(self.repo_info.id , lowerCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase__ , mode=lowerCamelCase__ , headers=get_authentication_headers_for_url(lowerCamelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def _a ( self , lowerCamelCase__ , **lowerCamelCase__ ):
self._get_dirs()
lowerCAmelCase_: int = self._strip_protocol(lowerCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
self._get_dirs()
lowerCAmelCase_: int = PurePosixPath(path.strip("/" ) )
lowerCAmelCase_: Optional[int] = {}
for p, f in self.dir_cache.items():
lowerCAmelCase_: Optional[Any] = PurePosixPath(p.strip("/" ) )
lowerCAmelCase_: Union[str, Any] = p.parent
if root == path:
lowerCAmelCase_: List[Any] = f
lowerCAmelCase_: Any = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 613
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: torch.FloatTensor
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase__ = 65_536 , lowerCamelCase__ = None , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 0 , lowerCamelCase__ = "fourier" , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ = "UNetMidBlock1D" , lowerCamelCase__ = None , lowerCamelCase__ = (32, 32, 64) , lowerCamelCase__ = None , lowerCamelCase__ = 8 , lowerCamelCase__ = 1 , lowerCamelCase__ = False , ):
super().__init__()
lowerCAmelCase_: Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
lowerCAmelCase_: Dict = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
lowerCAmelCase_: List[str] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCAmelCase_: Tuple = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
lowerCAmelCase_: Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCAmelCase_: Tuple = block_out_channels[0] * 4
lowerCAmelCase_: Any = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
lowerCAmelCase_: str = nn.ModuleList([] )
lowerCAmelCase_: Dict = None
lowerCAmelCase_: Optional[Any] = nn.ModuleList([] )
lowerCAmelCase_: int = None
# down
lowerCAmelCase_: List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = output_channel
lowerCAmelCase_: Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCAmelCase_: List[str] = i == len(lowerCamelCase__ ) - 1
lowerCAmelCase_: List[str] = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
lowerCAmelCase_: Optional[int] = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
lowerCAmelCase_: Dict = list(reversed(lowerCamelCase__ ) )
lowerCAmelCase_: Any = reversed_block_out_channels[0]
if out_block_type is None:
lowerCAmelCase_: str = out_channels
else:
lowerCAmelCase_: Any = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Dict = output_channel
lowerCAmelCase_: int = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
lowerCAmelCase_: Optional[int] = i == len(lowerCamelCase__ ) - 1
lowerCAmelCase_: Union[str, Any] = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
lowerCAmelCase_: str = output_channel
# out
lowerCAmelCase_: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCAmelCase_: int = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
lowerCAmelCase_: Any = timestep
if not torch.is_tensor(lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
lowerCAmelCase_: List[Any] = timesteps[None].to(sample.device )
lowerCAmelCase_: Union[str, Any] = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
lowerCAmelCase_: Any = self.time_mlp(lowerCamelCase__ )
else:
lowerCAmelCase_: Any = timestep_embed[..., None]
lowerCAmelCase_: str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCAmelCase_: Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCAmelCase_: Dict = ()
for downsample_block in self.down_blocks:
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCAmelCase_: int = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCAmelCase_: Any = down_block_res_samples[-1:]
lowerCAmelCase_: str = down_block_res_samples[:-1]
lowerCAmelCase_: List[str] = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
lowerCAmelCase_: Any = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 613
| 1
|
"""simple docstring"""
def a_ ( lowerCamelCase ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
UpperCAmelCase__ = 0
UpperCAmelCase__ = str(snake_case_ )
while len(snake_case_ ) != 1:
UpperCAmelCase__ = [int(snake_case_ ) for i in num_string]
UpperCAmelCase__ = 1
for i in range(0 , len(snake_case_ ) ):
total *= numbers[i]
UpperCAmelCase__ = str(snake_case_ )
steps += 1
return steps
def a_ ( lowerCamelCase ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
UpperCAmelCase__ = 0
UpperCAmelCase__ = str(snake_case_ )
while len(snake_case_ ) != 1:
UpperCAmelCase__ = [int(snake_case_ ) for i in num_string]
UpperCAmelCase__ = 0
for i in range(0 , len(snake_case_ ) ):
total += numbers[i]
UpperCAmelCase__ = str(snake_case_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ : str = 'base_with_context'
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['self_attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['MultiHeadDotProductAttention_0']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array , lowerCamelCase )
UpperCAmelCase__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
UpperCAmelCase__ = inference.parse_training_gin_file(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path , lowerCamelCase )
UpperCAmelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , lowerCamelCase )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=lowerCamelCase , continuous_encoder=lowerCamelCase , decoder=lowerCamelCase , scheduler=lowerCamelCase , melgan=lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowerCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 632
| 0
|
def A_ ( lowercase_ ) -> str:
_snake_case : str = len(lowercase_ )
for i in range(length - 1 ):
_snake_case : Any = i
for k in range(i + 1 , lowercase_ ):
if collection[k] < collection[least]:
_snake_case : Optional[Any] = k
if least != i:
_snake_case , _snake_case : List[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 326
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
# Initialise PyTorch model
_snake_case : List[str] = BertConfig.from_json_file(lowercase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case : Dict = BertForPreTraining(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 326
| 1
|
"""simple docstring"""
import argparse
import datetime
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
A_ : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCAmelCase ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
A_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
A_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
A_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
A_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
A_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
A_ : Union[str, Any] = datetime.date(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , int(_UpperCAmelCase ) )
# Start math
if m <= 2:
A_ : Any = y - 1
A_ : List[Any] = m + 12
# maths var
A_ : int = int(str(_UpperCAmelCase )[:2] )
A_ : int = int(str(_UpperCAmelCase )[2:] )
A_ : int = int(2.6 * m - 5.39 )
A_ : int = int(c / 4 )
A_ : int = int(k / 4 )
A_ : int = int(d + k )
A_ : int = int(t + u + v + x )
A_ : int = int(z - (2 * c) )
A_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
A_ : str = f"""Your date {date_input}, is a {days[str(_UpperCAmelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
zeller(args.date_input)
| 705
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase_ : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase_ : Optional[int] = [0, 25, 50]
lowerCamelCase_ : Union[str, Any] = [25, 50, 75]
lowerCamelCase_ : List[Any] = fuzz.membership.trimf(X, abca)
lowerCamelCase_ : Optional[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase_ : Optional[int] = np.ones(75)
lowerCamelCase_ : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase_ : Dict = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase_ : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase_ : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase_ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase_ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase_ : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase_ : List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase_ : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 302
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = XGLMConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : int , a_ : List[Any] , a_ : Dict=14 , a_ : int=7 , a_ : List[Any]=True , a_ : Optional[int]=True , a_ : Optional[int]=True , a_ : int=99 , a_ : Optional[int]=32 , a_ : Tuple=2 , a_ : Any=4 , a_ : Union[str, Any]=37 , a_ : Any="gelu" , a_ : str=0.1 , a_ : Any=0.1 , a_ : str=512 , a_ : List[Any]=0.02 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = d_model
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = ffn_dim
__snake_case = activation_function
__snake_case = activation_dropout
__snake_case = attention_dropout
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = None
__snake_case = 0
__snake_case = 2
__snake_case = 1
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = self.get_config()
__snake_case = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A ( self : str ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=a_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=a_ , )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFXGLMForCausalLM,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = TFXGLMModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , n_embd=37 )
def A ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def A ( self : Tuple ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFXGLMModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def A ( self : Dict ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : List[Any] , a_ : Dict=True ):
"""simple docstring"""
__snake_case = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__snake_case = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__snake_case = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
__snake_case = model.generate(a_ , do_sample=a_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , a_ )
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__snake_case = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__snake_case = tokenizer("Today is a nice day and" , return_tensors="tf" )
__snake_case = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__snake_case = model.generate(a_ , do_sample=a_ , seed=[7, 0] )
__snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ )
__snake_case = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(a_ , a_ )
@slow
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__snake_case = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__snake_case = "left"
# use different length sentences to test batching
__snake_case = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__snake_case = tokenizer(a_ , return_tensors="tf" , padding=a_ )
__snake_case = inputs["input_ids"]
__snake_case = model.generate(input_ids=a_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
__snake_case = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
__snake_case = model.generate(input_ids=a_ , max_new_tokens=12 )
__snake_case = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
__snake_case = model.generate(input_ids=a_ , max_new_tokens=12 )
__snake_case = tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
__snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ )
__snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ )
__snake_case = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(a_ , a_ )
self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] )
| 69
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
__magic_name__ = StableDiffusionPanoramaPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
snake_case : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
snake_case : Tuple = DDIMScheduler()
torch.manual_seed(0 )
snake_case : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case : int = CLIPTextModel(_lowercase )
snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowercase ( self : List[str] , _lowercase : int , _lowercase : Dict=0 ) -> Optional[int]:
snake_case : Union[str, Any] = torch.manual_seed(_lowercase )
snake_case : str = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __lowercase ( self : str ) -> List[str]:
snake_case : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components()
snake_case : Any = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : List[Any] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Optional[Any] = self.get_dummy_inputs(_lowercase )
snake_case : Union[str, Any] = sd_pipe(**_lowercase ).images
snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : List[Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : int ) -> Union[str, Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self : Tuple ) -> Tuple:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __lowercase ( self : Any ) -> List[Any]:
snake_case : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : Union[str, Any] = self.get_dummy_components()
snake_case : Tuple = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : Tuple = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : List[str] = self.get_dummy_inputs(_lowercase )
snake_case : int = "french fries"
snake_case : Union[str, Any] = sd_pipe(**_lowercase , negative_prompt=_lowercase )
snake_case : str = output.images
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : str ) -> Any:
snake_case : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : List[Any] = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : int = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Tuple = self.get_dummy_inputs(_lowercase )
snake_case : str = sd_pipe(**_lowercase , view_batch_size=2 )
snake_case : Optional[Any] = output.images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : str = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : int ) -> Optional[Any]:
snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : List[str] = self.get_dummy_components()
snake_case : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
snake_case : List[str] = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : List[str] = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : List[Any] = self.get_dummy_inputs(_lowercase )
snake_case : Optional[Any] = sd_pipe(**_lowercase ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Tuple = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Tuple ) -> Union[str, Any]:
snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : Optional[int] = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=_lowercase )
snake_case : Dict = StableDiffusionPanoramaPipeline(**_lowercase )
snake_case : Dict = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Dict = self.get_dummy_inputs(_lowercase )
snake_case : Optional[int] = sd_pipe(**_lowercase ).images
snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : Optional[int] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
def __lowercase ( self : Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict , _lowercase : Any=0 ) -> Optional[Any]:
snake_case : Any = torch.manual_seed(_lowercase )
snake_case : Any = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
snake_case : List[Any] = "stabilityai/stable-diffusion-2-base"
snake_case : Dict = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
snake_case : Any = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case : str = self.get_inputs()
snake_case : List[Any] = pipe(**_lowercase ).images
snake_case : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case : List[str] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __lowercase ( self : Tuple ) -> List[str]:
snake_case : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_lowercase )
snake_case : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case : Dict = self.get_inputs()
snake_case : int = pipe(**_lowercase ).images
snake_case : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case : List[str] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self : str ) -> Any:
snake_case : Any = 0
def callback_fn(_lowercase : int , _lowercase : int , _lowercase : torch.FloatTensor ) -> None:
snake_case : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case : int = latents[0, -3:, -3:, -1]
snake_case : Tuple = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case : Tuple = latents[0, -3:, -3:, -1]
snake_case : Tuple = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case : Dict = False
snake_case : List[Any] = "stabilityai/stable-diffusion-2-base"
snake_case : List[str] = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
snake_case : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
snake_case : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
snake_case : Tuple = self.get_inputs()
pipe(**_lowercase , callback=_lowercase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self : Any ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : Optional[int] = "stabilityai/stable-diffusion-2-base"
snake_case : Tuple = DDIMScheduler.from_pretrained(_lowercase , subfolder="scheduler" )
snake_case : Any = StableDiffusionPanoramaPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
snake_case : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Any = self.get_inputs()
snake_case : Optional[int] = pipe(**_lowercase )
snake_case : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 449
| 0
|
'''simple docstring'''
from __future__ import annotations
__snake_case : str = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = graph
# mapping node to its parent in resulting breadth first tree
A_ = {}
A_ = source_vertex
def __A ( self ) -> None:
A_ = {self.source_vertex}
A_ = None
A_ = [self.source_vertex] # first in first out queue
while queue:
A_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
A_ = vertex
queue.append(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
A_ = self.parent.get(_SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
A_ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return self.shortest_path(_SCREAMING_SNAKE_CASE ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__snake_case : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 174
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__snake_case : str = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCamelCase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
if self.framework == "tf":
A_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __A ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
A_ = self.get_masked_index(_SCREAMING_SNAKE_CASE )
A_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> int:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]:
if return_tensors is None:
A_ = self.framework
A_ = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(_SCREAMING_SNAKE_CASE )
return model_inputs
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
A_ = self.model(**_SCREAMING_SNAKE_CASE )
A_ = model_inputs['''input_ids''']
return model_outputs
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A_ = target_ids.shape[0]
A_ = model_outputs['''input_ids'''][0]
A_ = model_outputs['''logits''']
if self.framework == "tf":
A_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ = outputs.numpy()
A_ = outputs[0, masked_index, :]
A_ = stable_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
A_ = tf.gather_nd(tf.squeeze(_SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ = tf.expand_dims(_SCREAMING_SNAKE_CASE , 0 )
A_ = tf.math.top_k(_SCREAMING_SNAKE_CASE , k=_SCREAMING_SNAKE_CASE )
A_ ,A_ = topk.values.numpy(), topk.indices.numpy()
else:
A_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ = outputs[0, masked_index, :]
A_ = logits.softmax(dim=-1 )
if target_ids is not None:
A_ = probs[..., target_ids]
A_ ,A_ = probs.topk(_SCREAMING_SNAKE_CASE )
A_ = []
A_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ = input_ids.numpy().copy()
if target_ids is not None:
A_ = target_ids[p].tolist()
A_ = p
# Filter padding out:
A_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
A_ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_SCREAMING_SNAKE_CASE )
result.append(_SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = [targets]
try:
A_ = self.tokenizer.get_vocab()
except Exception:
A_ = {}
A_ = []
for target in targets:
A_ = vocab.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if id_ is None:
A_ = self.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , max_length=1 , truncation=_SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(_SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
A_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
A_ = list(set(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
A_ = np.array(_SCREAMING_SNAKE_CASE )
return target_ids
def __A ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
A_ = {}
if targets is not None:
A_ = self.get_target_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = target_ids
if top_k is not None:
A_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 174
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.