code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def lowercase_ (A : Union[str, Any] , A : Any , A : Optional[Any] ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A , n - 1 , A ) * a) % mod
else:
snake_case__ : int = binary_exponentiation(A , n / 2 , A )
return (b * b) % mod
# a prime number
a_ :Dict = 701
a_ :List[str] = 1_000_000_000
a_ :str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 478
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ :int = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any], _snake_case : Dict=2_2_4, _snake_case : Optional[Any]=4, _snake_case : Dict=3, _snake_case : int=9_6, _snake_case : int=[2, 2, 6, 2], _snake_case : int=[3, 6, 1_2, 2_4], _snake_case : Tuple=7, _snake_case : Tuple=4.0, _snake_case : int=True, _snake_case : Union[str, Any]=0.0, _snake_case : Tuple=0.0, _snake_case : Dict=0.1, _snake_case : Optional[int]="gelu", _snake_case : List[str]=False, _snake_case : Union[str, Any]=0.0_2, _snake_case : int=1e-5, _snake_case : Any=None, _snake_case : Tuple=None, **_snake_case : int, ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : List[Any] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Union[str, Any] = embed_dim
snake_case__ : Dict = depths
snake_case__ : Optional[Any] = len(_snake_case )
snake_case__ : Optional[int] = num_heads
snake_case__ : Union[str, Any] = window_size
snake_case__ : Tuple = mlp_ratio
snake_case__ : Tuple = qkv_bias
snake_case__ : int = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Tuple = use_absolute_embeddings
snake_case__ : Tuple = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : int = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
snake_case__ : int = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(_snake_case ) + 1 )]
snake_case__ , snake_case__ : int = get_aligned_output_features_output_indices(
out_features=_snake_case, out_indices=_snake_case, stage_names=self.stage_names )
| 478
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715
|
def lowerCamelCase__ ( a : int = 1_000_000 ) -> int:
"""simple docstring"""
a__ :int = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 373
| 0
|
def UpperCAmelCase_ ( __UpperCAmelCase : int = 2_00 ) -> int:
SCREAMING_SNAKE_CASE_ = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
SCREAMING_SNAKE_CASE_ = [0] * (pence + 1)
SCREAMING_SNAKE_CASE_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__UpperCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 31
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "Speech2TextFeatureExtractor"
SCREAMING_SNAKE_CASE__ : List[str] = "Speech2TextTokenizer"
def __init__( self: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
def __call__( self: List[str] , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _A ( self: List[str] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _A ( self: Union[str, Any] , *_lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _A ( self: List[Any] ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 234
| 0
|
def _lowerCAmelCase ( __magic_name__ :int = 1_0_0_0 ):
UpperCAmelCase_ = 2**power
UpperCAmelCase_ = 0
while n:
UpperCAmelCase_, UpperCAmelCase_ = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 407
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class snake_case__ :
'''simple docstring'''
__A = None
__A = None
__A = None # sigma(t_i)
@classmethod
def UpperCamelCase ( cls : Dict ) -> Optional[int]:
return cls()
@dataclass
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = 42
__A = 42
__A = 42
class snake_case__ ( __snake_case , __snake_case ):
'''simple docstring'''
@property
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
return True
@register_to_config
def __init__( self : Dict , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 1_00 , lowerCAmelCase_ : float = 1.007 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.05 , lowerCAmelCase_ : float = 50 , ) -> Any:
pass
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return KarrasVeSchedulerState.create()
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ = sigma + gamma * sigma
UpperCAmelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ = sample_hat + sigma_hat * model_output
UpperCAmelCase_ = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ = sample_prev + sigma_prev * model_output
UpperCAmelCase_ = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def UpperCamelCase ( self : str , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Tuple:
raise NotImplementedError()
| 407
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE = True
def __init__( self , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__a =kwargs.pop('encoder' )
__a =encoder_config.pop('model_type' )
__a =kwargs.pop('decoder' )
__a =decoder_config.pop('model_type' )
__a =AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__a =AutoConfig.for_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__a =True
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__a =True
__a =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =copy.deepcopy(self.__dict__ )
__a =self.encoder.to_dict()
__a =self.decoder.to_dict()
__a =self.__class__.model_type
return output
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =OrderedDict()
__a ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__a ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__a ={0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def __magic_name__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ) -> List[str]:
'''simple docstring'''
import torch
__a =OrderedDict()
__a =super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
__a =dummy_input["""input_ids"""].shape
__a =(batch, encoder_sequence, self._config.encoder_hidden_size)
__a =dummy_input.pop('input_ids' )
__a =dummy_input.pop('attention_mask' )
__a =torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = "default" ) -> Dict:
'''simple docstring'''
__a =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 242
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _A ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__()
snake_case : Dict = model
snake_case : Optional[int] = 2
snake_case : Optional[Any] = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def snake_case_ ( self ):
'''simple docstring'''
pass
def lowercase ( __A : str , __A : str , __A : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = LongformerModel.from_pretrained(__A )
snake_case : Tuple = LightningModel(__A )
snake_case : Optional[int] = torch.load(__A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case : Dict = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 36
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A_ ( _snake_case ):
"""simple docstring"""
lowercase : List[Any] = 'time_series_transformer'
lowercase : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "student_t" , __UpperCAmelCase = "nll" , __UpperCAmelCase = 1 , __UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] , __UpperCAmelCase = "mean" , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = 2 , __UpperCAmelCase = True , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 64 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 1_00 , __UpperCAmelCase = 0.02 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> List[str]:
a : List[str] = prediction_length
a : Union[str, Any] = context_length or prediction_length
a : str = distribution_output
a : Optional[int] = loss
a : Optional[int] = input_size
a : List[str] = num_time_features
a : int = lags_sequence
a : str = scaling
a : Union[str, Any] = num_dynamic_real_features
a : List[Any] = num_static_real_features
a : Optional[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
a : str = cardinality
else:
a : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
a : List[str] = embedding_dimension
else:
a : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a : List[Any] = num_parallel_samples
# Transformer architecture configuration
a : List[Any] = input_size * len(__UpperCAmelCase ) + self._number_of_features
a : Optional[int] = d_model
a : List[Any] = encoder_attention_heads
a : List[str] = decoder_attention_heads
a : List[Any] = encoder_ffn_dim
a : List[str] = decoder_ffn_dim
a : List[str] = encoder_layers
a : Union[str, Any] = decoder_layers
a : Union[str, Any] = dropout
a : Union[str, Any] = attention_dropout
a : List[Any] = activation_dropout
a : Tuple = encoder_layerdrop
a : Tuple = decoder_layerdrop
a : Any = activation_function
a : List[Any] = init_std
a : Dict = use_cache
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ ( self ) -> List[Any]:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 713
|
"""simple docstring"""
import os
def A_ ( ) -> Dict:
a : List[str] = os.path.join(os.path.dirname(UpperCAmelCase__ ) , 'num.txt' )
with open(UpperCAmelCase__ ) as file_hand:
return str(sum(int(UpperCAmelCase__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 509
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=10 , lowercase__=3 , lowercase__=2 , lowercase__=2 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.02 , lowercase__="divided_space_time" , lowercase__=None , ):
snake_case_ : Tuple = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Any = image_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[int] = patch_size
snake_case_ : Dict = num_frames
snake_case_ : Dict = is_training
snake_case_ : Union[str, Any] = use_labels
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = attention_type
snake_case_ : Tuple = initializer_range
snake_case_ : Union[str, Any] = scope
snake_case_ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : Optional[int] = (num_frames) * self.num_patches_per_frame + 1
def __UpperCamelCase (self ):
snake_case_ : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case_ : int = None
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase (self ):
snake_case_ : Tuple = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
snake_case_ : Tuple = self.num_labels
return config
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Tuple = TimesformerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Optional[Any] = TimesformerForVideoClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
snake_case_ : Tuple = model(lowercase__ )
# verify the logits shape
snake_case_ : str = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Tuple = config_and_inputs
snake_case_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_A : List[str] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_A : List[str] = False
_A : Optional[int] = False
_A : Tuple = False
_A : Union[str, Any] = False
def __UpperCamelCase (self ):
snake_case_ : Tuple = TimesformerModelTester(self )
snake_case_ : List[Any] = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=False ):
snake_case_ : Any = copy.deepcopy(lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
snake_case_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(lowercase__ )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Any = [*signature.parameters.keys()]
snake_case_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase__ )
@slow
def __UpperCamelCase (self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : List[Any] = TimesformerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __UpperCamelCase (self ):
if not self.has_attentions:
pass
else:
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = True
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = self.model_tester.seq_length
snake_case_ : Dict = self.model_tester.num_frames
snake_case_ : Union[str, Any] = True
snake_case_ : int = False
snake_case_ : List[str] = True
snake_case_ : Optional[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
snake_case_ : Optional[int] = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : List[Any] = True
snake_case_ : Tuple = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
snake_case_ : Any = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
snake_case_ : Dict = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
snake_case_ : Any = len(lowercase__ )
# Check attention is always last and order is fine
snake_case_ : Optional[Any] = True
snake_case_ : Any = True
snake_case_ : Optional[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
snake_case_ : Optional[Any] = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + 1 , len(lowercase__ ) )
snake_case_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCamelCase (self ):
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Optional[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
snake_case_ : Optional[Any] = outputs.hidden_states
snake_case_ : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase__ ) , lowercase__ )
snake_case_ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : str = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : List[str] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
snake_case_ : Dict = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_vision
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def __UpperCamelCase (self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase (self ):
snake_case_ : List[Any] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase__ )
snake_case_ : Optional[int] = self.default_image_processor
snake_case_ : str = prepare_video()
snake_case_ : int = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase__ )
# forward pass
with torch.no_grad():
snake_case_ : List[Any] = model(**lowercase__ )
# verify the logits
snake_case_ : str = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowercase__ )
snake_case_ : int = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 480
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE__ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE__ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Color current vertex
snake_case_ : Dict = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ):
return True
# Backtrack
snake_case_ : List[Any] = -1
return False
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : int = [-1] * len(SCREAMING_SNAKE_CASE__ )
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 ):
return colored_vertices
return []
| 480
| 1
|
'''simple docstring'''
import math
def _lowerCAmelCase ( _lowerCAmelCase )-> int:
__UpperCAmelCase = [True] * n
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__UpperCAmelCase = i * 2
while index < n:
__UpperCAmelCase = False
__UpperCAmelCase = index + i
__UpperCAmelCase = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def _lowerCAmelCase ( _lowerCAmelCase = 99_99_66_66_33_33 )-> Union[str, Any]:
__UpperCAmelCase = math.floor(math.sqrt(_lowerCAmelCase ) ) + 1_00
__UpperCAmelCase = prime_sieve(_lowerCAmelCase )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = primes[prime_index]
while (last_prime**2) <= limit:
__UpperCAmelCase = primes[prime_index + 1]
__UpperCAmelCase = last_prime**2
__UpperCAmelCase = next_prime**2
# Get numbers divisible by lps(current)
__UpperCAmelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__UpperCAmelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__UpperCAmelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__UpperCAmelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 706
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: Union[str, Any] = logging.get_logger(__name__)
_A: List[str] = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : int = """roc_bert"""
def __init__( self , __A=30_522 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=True , __A=0 , __A="absolute" , __A=None , __A=True , __A=True , __A=768 , __A=910 , __A=512 , __A=24_858 , __A=True , **__A , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = use_cache
__UpperCAmelCase = enable_pronunciation
__UpperCAmelCase = enable_shape
__UpperCAmelCase = pronunciation_embed_dim
__UpperCAmelCase = pronunciation_vocab_size
__UpperCAmelCase = shape_embed_dim
__UpperCAmelCase = shape_vocab_size
__UpperCAmelCase = concat_input
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 617
| 0
|
"""simple docstring"""
def a_ ( lowercase__ :int = 1000 ):
__lowerCamelCase = -1
__lowerCamelCase = 0
for a in range(1, n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCamelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCamelCase = a * b * c
if candidate >= product:
__lowerCamelCase = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 281
|
"""simple docstring"""
from collections import defaultdict
def a_ ( lowercase__ :int ):
__lowerCamelCase = 1
__lowerCamelCase = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowercase__ )
if ret % 2 == 0:
cuts.append(lowercase__ )
return ret
def a_ ( ):
dfs(1 )
if __name__ == "__main__":
__magic_name__ , __magic_name__ : Tuple = 1_0, 9
__magic_name__ : Tuple = defaultdict(list)
__magic_name__ : dict[int, bool] = {}
__magic_name__ : list[int] = []
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 281
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any=False ):
__a : Dict = OmegaConf.load(lowerCAmelCase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCAmelCase__ ) ) )
return config
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Any=None ):
if conf_path is None:
__a : Optional[Any] = '''./model_checkpoints/vqgan_only.yaml'''
__a : int = load_config(lowerCAmelCase__ , display=lowerCAmelCase__ )
__a : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
__a : Optional[int] = '''./model_checkpoints/vqgan_only.pt'''
__a : Dict = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
if ".ckpt" in ckpt_path:
__a : List[str] = sd['''state_dict''']
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
del sd
return model
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ):
__a , __a , __a : Optional[Any] = model.encode(lowerCAmelCase__ )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__a : int = model.decode(lowerCAmelCase__ )
return xrec
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any=False ):
__a , __a : Dict = string.rsplit('''.''' , 1 )
if reload:
__a : List[str] = importlib.import_module(lowerCAmelCase__ )
importlib.reload(lowerCAmelCase__ )
return getattr(importlib.import_module(lowerCAmelCase__ , package=lowerCAmelCase__ ) , cls )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=True ):
__a : Union[str, Any] = instantiate_from_config(lowerCAmelCase__ )
if sd is not None:
model.load_state_dict(lowerCAmelCase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
# load the specified checkpoint
if ckpt:
__a : Any = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a : List[str] = pl_sd['''global_step''']
print(f"loaded model from global step {global_step}." )
else:
__a : Dict = {'''state_dict''': None}
__a : List[str] = None
__a : Any = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowerCAmelCase__ , eval_mode=lowerCAmelCase__ )['''model''']
return model, global_step
| 326
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase__ ='src/transformers'
lowercase__ ='docs/source/en'
lowercase__ ='.'
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Any = f.readlines()
# Find the start prompt.
__a : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__a : Any = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase__ ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowercase__ =re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase__ =re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase__ =re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ =direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Any = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
__a : List[Any] = (width - text_length) // 2
__a : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCamelCase ( ):
__a : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__a : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__a : Optional[int] = collections.defaultdict(lowerCAmelCase__ )
__a : List[Any] = collections.defaultdict(lowerCAmelCase__ )
__a : Dict = collections.defaultdict(lowerCAmelCase__ )
__a : Tuple = collections.defaultdict(lowerCAmelCase__ )
__a : Union[str, Any] = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
__a : Any = None
if attr_name.endswith('''Tokenizer''' ):
__a : Union[str, Any] = slow_tokenizers
__a : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__a : Union[str, Any] = fast_tokenizers
__a : List[Any] = attr_name[:-1_3]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = tf_models
__a : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = flax_models
__a : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
__a : Union[str, Any] = pt_models
__a : int = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__a : List[str] = True
break
# Try again after removing the last word in the name
__a : str = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
__a : Optional[int] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__a : Optional[int] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__a : Any = [len(lowerCAmelCase__ ) + 2 for c in columns]
__a : Union[str, Any] = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
__a : List[str] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__a : Union[str, Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
__a : str = model_name_to_prefix[name]
__a : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int]=False ):
__a , __a , __a , __a : Optional[int] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__a : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 326
| 1
|
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [1]
for i in range(2 , __lowercase):
factorials.append(factorials[-1] * i)
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase_ = []
UpperCamelCase_ = list(range(__lowercase))
# Find permutation
while factorials:
UpperCamelCase_ = factorials.pop()
UpperCamelCase_ , UpperCamelCase_ = divmod(__lowercase , __lowercase)
permutation.append(elements[number])
elements.remove(elements[number])
permutation.append(elements[0])
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207
| 0
|
"""simple docstring"""
import os
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =os.path.dirname(os.path.realpath(__UpperCamelCase ) )
__UpperCamelCase =os.path.join(__UpperCamelCase , '''triangle.txt''' )
with open(__UpperCamelCase ) as f:
__UpperCamelCase =f.readlines()
__UpperCamelCase =[]
for line in triangle:
__UpperCamelCase =[]
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(__UpperCamelCase ) )
a.append(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(len(a[i] ) ):
__UpperCamelCase =a[i - 1][j] if j != len(a[i - 1] ) else 0
__UpperCamelCase =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCamelCase , __UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 296
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = BlenderbotSmallTokenizer
lowercase__ = False
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
super().setUp()
__UpperCamelCase =['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__UpperCamelCase ={'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
__UpperCamelCase ='''adapt act apte'''
__UpperCamelCase ='''adapt act apte'''
return input_text, output_text
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''adapt act apte'''
__UpperCamelCase =['''adapt''', '''act''', '''ap@@''', '''te''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__UpperCamelCase ='''I am a small frog.'''
__UpperCamelCase =tok([src_text] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
__UpperCamelCase =tok.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__UpperCamelCase ='''I am a small frog .'''
__UpperCamelCase ='''.'''
__UpperCamelCase =tok(UpperCamelCase__ )['''input_ids''']
__UpperCamelCase =tok(UpperCamelCase__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 296
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : Optional[int] = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> Any:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCAmelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def a ( self : Dict ) -> str:
# Build iterable dataset
if self.streaming:
__lowerCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 427
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_text_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_text_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = TextDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_text_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCAmelCase = text_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = TextDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_text_dataset(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({"""train""": text_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_text_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({"""train""": text_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_text_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = """train"""
__lowerCAmelCase = {"""train""": text_path, """test""": text_path}
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = TextDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_text_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 334
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self: Optional[int], _lowercase: int = 128, _lowercase: int = 256, _lowercase: float = 2_000.0, _lowercase: int = 768, _lowercase: int = 12, _lowercase: int = 12, _lowercase: int = 64, _lowercase: int = 2048, _lowercase: float = 0.1, ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Sequential(
nn.Linear(_lowercase, d_model * 4, bias=_lowercase), nn.SiLU(), nn.Linear(d_model * 4, d_model * 4, bias=_lowercase), nn.SiLU(), )
__lowerCAmelCase = nn.Embedding(_lowercase, _lowercase)
__lowerCAmelCase = False
__lowerCAmelCase = nn.Linear(_lowercase, _lowercase, bias=_lowercase)
__lowerCAmelCase = nn.Dropout(p=_lowercase)
__lowerCAmelCase = nn.ModuleList()
for lyr_num in range(_lowercase):
# FiLM conditional T5 decoder
__lowerCAmelCase = DecoderLayer(d_model=_lowercase, d_kv=_lowercase, num_heads=_lowercase, d_ff=_lowercase, dropout_rate=_lowercase)
self.decoders.append(_lowercase)
__lowerCAmelCase = TaLayerNorm(_lowercase)
__lowerCAmelCase = nn.Dropout(p=_lowercase)
__lowerCAmelCase = nn.Linear(_lowercase, _lowercase, bias=_lowercase)
def _lowercase ( self: Optional[int], _lowercase: Any, _lowercase: Dict):
'''simple docstring'''
__lowerCAmelCase = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def _lowercase ( self: Union[str, Any], _lowercase: Optional[int], _lowercase: Optional[Any], _lowercase: Dict):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCAmelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time, embedding_dim=self.config.d_model, max_period=self.config.max_decoder_noise_time, ).to(dtype=self.dtype)
__lowerCAmelCase = self.conditioning_emb(_lowercase).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCAmelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCAmelCase = torch.broadcast_to(
torch.arange(_lowercase, device=decoder_input_tokens.device), (batch, seq_length), )
__lowerCAmelCase = self.position_encoding(_lowercase)
__lowerCAmelCase = self.continuous_inputs_projection(_lowercase)
inputs += position_encodings
__lowerCAmelCase = self.dropout(_lowercase)
# decoder: No padding present.
__lowerCAmelCase = torch.ones(
decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
__lowerCAmelCase = [(x, self.encoder_decoder_mask(_lowercase, _lowercase)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1)
__lowerCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1)
for lyr in self.decoders:
__lowerCAmelCase = lyr(
_lowercase, conditioning_emb=_lowercase, encoder_hidden_states=_lowercase, encoder_attention_mask=_lowercase, )[0]
__lowerCAmelCase = self.decoder_norm(_lowercase)
__lowerCAmelCase = self.post_dropout(_lowercase)
__lowerCAmelCase = self.spec_out(_lowercase)
return spec_out
class lowercase_ ( nn.Module ):
def __init__( self: Optional[Any], _lowercase: Optional[int], _lowercase: Any, _lowercase: Optional[int], _lowercase: Optional[Any], _lowercase: Union[str, Any], _lowercase: List[Any]=1e-6):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase, d_kv=_lowercase, num_heads=_lowercase, dropout_rate=_lowercase))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase, d_kv=_lowercase, num_heads=_lowercase, dropout_rate=_lowercase, layer_norm_epsilon=_lowercase, ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase, d_ff=_lowercase, dropout_rate=_lowercase, layer_norm_epsilon=_lowercase))
def _lowercase ( self: Union[str, Any], _lowercase: List[Any], _lowercase: Optional[Any]=None, _lowercase: Optional[int]=None, _lowercase: str=None, _lowercase: List[str]=None, _lowercase: Dict=None, ):
'''simple docstring'''
__lowerCAmelCase = self.layer[0](
_lowercase, conditioning_emb=_lowercase, attention_mask=_lowercase, )
if encoder_hidden_states is not None:
__lowerCAmelCase = torch.where(encoder_attention_mask > 0, 0, -1e10).to(
encoder_hidden_states.dtype)
__lowerCAmelCase = self.layer[1](
_lowercase, key_value_states=_lowercase, attention_mask=_lowercase, )
# Apply Film Conditional Feed Forward layer
__lowerCAmelCase = self.layer[-1](_lowercase, _lowercase)
return (hidden_states,)
class lowercase_ ( nn.Module ):
def __init__( self: int, _lowercase: int, _lowercase: Tuple, _lowercase: Union[str, Any], _lowercase: Optional[int]):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = TaLayerNorm(_lowercase)
__lowerCAmelCase = TaFiLMLayer(in_features=d_model * 4, out_features=_lowercase)
__lowerCAmelCase = Attention(query_dim=_lowercase, heads=_lowercase, dim_head=_lowercase, out_bias=_lowercase, scale_qk=_lowercase)
__lowerCAmelCase = nn.Dropout(_lowercase)
def _lowercase ( self: int, _lowercase: Union[str, Any], _lowercase: Union[str, Any]=None, _lowercase: Tuple=None, ):
'''simple docstring'''
__lowerCAmelCase = self.layer_norm(_lowercase)
if conditioning_emb is not None:
__lowerCAmelCase = self.FiLMLayer(_lowercase, _lowercase)
# Self-attention block
__lowerCAmelCase = self.attention(_lowercase)
__lowerCAmelCase = hidden_states + self.dropout(_lowercase)
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self: Optional[int], _lowercase: List[Any], _lowercase: Union[str, Any], _lowercase: List[str], _lowercase: List[Any], _lowercase: Optional[int]):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = Attention(query_dim=_lowercase, heads=_lowercase, dim_head=_lowercase, out_bias=_lowercase, scale_qk=_lowercase)
__lowerCAmelCase = TaLayerNorm(_lowercase, eps=_lowercase)
__lowerCAmelCase = nn.Dropout(_lowercase)
def _lowercase ( self: List[str], _lowercase: Any, _lowercase: Union[str, Any]=None, _lowercase: List[str]=None, ):
'''simple docstring'''
__lowerCAmelCase = self.layer_norm(_lowercase)
__lowerCAmelCase = self.attention(
_lowercase, encoder_hidden_states=_lowercase, attention_mask=attention_mask.squeeze(1), )
__lowerCAmelCase = hidden_states + self.dropout(_lowercase)
return layer_output
class lowercase_ ( nn.Module ):
def __init__( self: Tuple, _lowercase: Union[str, Any], _lowercase: Optional[int], _lowercase: Dict, _lowercase: str):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = TaDenseGatedActDense(d_model=_lowercase, d_ff=_lowercase, dropout_rate=_lowercase)
__lowerCAmelCase = TaFiLMLayer(in_features=d_model * 4, out_features=_lowercase)
__lowerCAmelCase = TaLayerNorm(_lowercase, eps=_lowercase)
__lowerCAmelCase = nn.Dropout(_lowercase)
def _lowercase ( self: Optional[Any], _lowercase: List[Any], _lowercase: Optional[int]=None):
'''simple docstring'''
__lowerCAmelCase = self.layer_norm(_lowercase)
if conditioning_emb is not None:
__lowerCAmelCase = self.film(_lowercase, _lowercase)
__lowerCAmelCase = self.DenseReluDense(_lowercase)
__lowerCAmelCase = hidden_states + self.dropout(_lowercase)
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self: Any, _lowercase: Optional[int], _lowercase: Union[str, Any], _lowercase: List[str]):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(_lowercase, _lowercase, bias=_lowercase)
__lowerCAmelCase = nn.Linear(_lowercase, _lowercase, bias=_lowercase)
__lowerCAmelCase = nn.Linear(_lowercase, _lowercase, bias=_lowercase)
__lowerCAmelCase = nn.Dropout(_lowercase)
__lowerCAmelCase = NewGELUActivation()
def _lowercase ( self: str, _lowercase: Union[str, Any]):
'''simple docstring'''
__lowerCAmelCase = self.act(self.wi_a(_lowercase))
__lowerCAmelCase = self.wi_a(_lowercase)
__lowerCAmelCase = hidden_gelu * hidden_linear
__lowerCAmelCase = self.dropout(_lowercase)
__lowerCAmelCase = self.wo(_lowercase)
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self: Dict, _lowercase: Optional[Any], _lowercase: List[str]=1e-6):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Parameter(torch.ones(_lowercase))
__lowerCAmelCase = eps
def _lowercase ( self: Any, _lowercase: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = hidden_states.to(torch.floataa).pow(2).mean(-1, keepdim=_lowercase)
__lowerCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCAmelCase = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class lowercase_ ( nn.Module ):
def _lowercase ( self: Optional[int], _lowercase: torch.Tensor):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044_715 * torch.pow(_lowercase, 3.0))))
class lowercase_ ( nn.Module ):
def __init__( self: List[str], _lowercase: Optional[int], _lowercase: List[str]):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(_lowercase, out_features * 2, bias=_lowercase)
def _lowercase ( self: List[str], _lowercase: Tuple, _lowercase: List[Any]):
'''simple docstring'''
__lowerCAmelCase = self.scale_bias(_lowercase)
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(_lowercase, 2, -1)
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 334
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _lowerCAmelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> Any:
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
__magic_name__ : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
__magic_name__ : Any = int(lowerCAmelCase__ )
__magic_name__ : int = dict(sorted(self.labels.items() ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[int]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : List[str] = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
__magic_name__ : Dict = len(lowerCAmelCase__ )
__magic_name__ : int = self.transformer.config.sample_size
__magic_name__ : Union[str, Any] = self.transformer.config.in_channels
__magic_name__ : Any = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
__magic_name__ : int = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__magic_name__ : Union[str, Any] = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
__magic_name__ : Optional[int] = torch.tensor([10_00] * batch_size , device=self.device )
__magic_name__ : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__magic_name__ : List[str] = latent_model_input[: len(lowerCAmelCase__ ) // 2]
__magic_name__ : Optional[Any] = torch.cat([half, half] , dim=0 )
__magic_name__ : str = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : int = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__magic_name__ : Tuple = latent_model_input.device.type == """mps"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Dict = torch.floataa if is_mps else torch.floataa
else:
__magic_name__ : Any = torch.intaa if is_mps else torch.intaa
__magic_name__ : Union[str, Any] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__magic_name__ : Any = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__magic_name__ : Union[str, Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__magic_name__ : int = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
__magic_name__ ,__magic_name__ : Union[str, Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__magic_name__ ,__magic_name__ : str = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
__magic_name__ : Union[str, Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__magic_name__ : Optional[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__magic_name__ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__magic_name__ ,__magic_name__ : Tuple = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
__magic_name__ : Tuple = noise_pred
# compute previous image: x_t -> x_t-1
__magic_name__ : str = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
__magic_name__ ,__magic_name__ : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
__magic_name__ : int = latent_model_input
__magic_name__ : Tuple = 1 / self.vae.config.scaling_factor * latents
__magic_name__ : List[str] = self.vae.decode(lowerCAmelCase__ ).sample
__magic_name__ : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__magic_name__ : List[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : Tuple = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 324
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__: Any = logging.get_logger(__name__)
__magic_name__: Dict = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = '''roformer'''
def __init__( self , lowerCAmelCase__=5_00_00 , lowerCAmelCase__=None , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=15_36 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-1_2 , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : int = vocab_size
__magic_name__ : Optional[Any] = hidden_size if embedding_size is None else embedding_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
__magic_name__ : Tuple = hidden_act
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : str = initializer_range
__magic_name__ : str = layer_norm_eps
__magic_name__ : List[Any] = rotary_value
__magic_name__ : Optional[int] = use_cache
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__magic_name__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ : int = {0: """batch""", 1: """sequence"""}
__magic_name__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 324
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCamelCase :
'''simple docstring'''
def a_ ( self , a__ , a__ ):
pass
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(a__ , a__ )
__SCREAMING_SNAKE_CASE : str = TFVisionTextDualEncoderModel(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def a_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ):
__SCREAMING_SNAKE_CASE : Tuple = self.get_vision_text_model(a__ , a__ )
__SCREAMING_SNAKE_CASE : List[Any] = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__SCREAMING_SNAKE_CASE : int = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_vision_text_model(a__ , a__ )
__SCREAMING_SNAKE_CASE : Any = {"vision_model": vision_model, "text_model": text_model}
__SCREAMING_SNAKE_CASE : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a__ )
__SCREAMING_SNAKE_CASE : int = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_vision_text_model(a__ , a__ )
__SCREAMING_SNAKE_CASE : str = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Tuple = model(input_ids=a__ , pixel_values=a__ , attention_mask=a__ )
__SCREAMING_SNAKE_CASE : Tuple = after_output[0].numpy()
__SCREAMING_SNAKE_CASE : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
def a_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ):
__SCREAMING_SNAKE_CASE : int = self.get_vision_text_model(a__ , a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__SCREAMING_SNAKE_CASE : int = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__SCREAMING_SNAKE_CASE : int = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : Any = to_atuple(vision_model.config.image_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = to_atuple(vision_model.config.patch_size )
__SCREAMING_SNAKE_CASE : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__SCREAMING_SNAKE_CASE : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a_ ( self , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = np.abs((a - b) ).max()
self.assertLessEqual(a__ , a__ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
self.check_save_load(**a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**a__ )
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_pretrained_model_and_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_a(**a__ )
__SCREAMING_SNAKE_CASE : Any = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Any = model_a(**a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = after_outputs[0].numpy()
__SCREAMING_SNAKE_CASE : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
@require_tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__SCREAMING_SNAKE_CASE : List[str] = 13
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__SCREAMING_SNAKE_CASE : str = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE : List[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = TFViTModel(a__ , name="vision_model" )
__SCREAMING_SNAKE_CASE : int = TFBertModel(a__ , name="text_model" )
return vision_model, text_model
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = TFViTModelTester(self )
__SCREAMING_SNAKE_CASE : int = TFBertModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : Any = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[str] = vision_config_and_inputs
(
__SCREAMING_SNAKE_CASE
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__SCREAMING_SNAKE_CASE : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__SCREAMING_SNAKE_CASE : str = 13
__SCREAMING_SNAKE_CASE : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__SCREAMING_SNAKE_CASE : str = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE : Tuple = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a_ ( self , a__ , a__ , a__ , a__ , a__=None , **a__ ):
__SCREAMING_SNAKE_CASE : List[str] = self.get_vision_text_model(a__ , a__ )
__SCREAMING_SNAKE_CASE : Any = TFVisionTextDualEncoderModel(vision_model=a__ , text_model=a__ )
__SCREAMING_SNAKE_CASE : Any = model(
input_ids=a__ , pixel_values=a__ , attention_mask=a__ , output_attentions=a__ )
__SCREAMING_SNAKE_CASE : Any = output.vision_model_output.attentions
self.assertEqual(len(a__ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__SCREAMING_SNAKE_CASE : Any = to_atuple(vision_model.config.image_size )
__SCREAMING_SNAKE_CASE : List[str] = to_atuple(vision_model.config.patch_size )
__SCREAMING_SNAKE_CASE : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__SCREAMING_SNAKE_CASE : Optional[int] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__SCREAMING_SNAKE_CASE : Tuple = output.text_model_output.attentions
self.assertEqual(len(a__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : int = TFDeiTModel(a__ , name="vision_model" )
__SCREAMING_SNAKE_CASE : List[str] = TFRobertaModel(a__ , name="text_model" )
return vision_model, text_model
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = TFDeiTModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFRobertaModelTester(self )
__SCREAMING_SNAKE_CASE : Any = vit_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[str] = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : Tuple = vision_config_and_inputs
(
__SCREAMING_SNAKE_CASE
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__SCREAMING_SNAKE_CASE : Optional[int] = 13
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__SCREAMING_SNAKE_CASE : str = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__SCREAMING_SNAKE_CASE : str = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def a_ ( self , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = TFCLIPVisionModel(a__ , name="vision_model" )
__SCREAMING_SNAKE_CASE : Optional[Any] = TFBertModel(a__ , name="text_model" )
return vision_model, text_model
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = TFCLIPVisionModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = TFBertModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : int = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : List[str] = vision_config_and_inputs
(
__SCREAMING_SNAKE_CASE
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=a__ )
__SCREAMING_SNAKE_CASE : Dict = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__SCREAMING_SNAKE_CASE : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=a__ , padding=a__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE : str = model(**a__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__SCREAMING_SNAKE_CASE : str = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a__ , atol=1e-3 ) )
| 700
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase = logging.get_logger(__name__)
lowercase = '''T5Config'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = '''mt5'''
snake_case__ : Dict = MTaConfig
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = '''mt5'''
snake_case__ : List[str] = MTaConfig
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = '''mt5'''
snake_case__ : Union[str, Any] = MTaConfig
| 564
| 0
|
"""simple docstring"""
_SCREAMING_SNAKE_CASE = range(2, 20 + 1)
_SCREAMING_SNAKE_CASE = [10**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE = {}
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) )
__snake_case = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) )
__snake_case = 0, 0
__snake_case = n - i
__snake_case = memo.get(SCREAMING_SNAKE_CASE )
if sub_memo is not None:
__snake_case = sub_memo.get(SCREAMING_SNAKE_CASE )
if jumps is not None and len(SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
__snake_case = -1
for _k in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__snake_case = _k
break
if max_jump >= 0:
__snake_case = jumps[max_jump]
# since the difference between jumps is cached, add c
__snake_case = diff + c
for j in range(min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ):
__snake_case = divmod(SCREAMING_SNAKE_CASE , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__snake_case = []
else:
__snake_case = {c: []}
__snake_case = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__snake_case = next_term(SCREAMING_SNAKE_CASE , k - 1 , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__snake_case = compute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
__snake_case = sub_memo[c]
# keep jumps sorted by # of terms skipped
__snake_case = 0
while j < len(SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__snake_case = i
__snake_case = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__snake_case = ds_c + ds_b
diff += addend
__snake_case = 0
for j in range(SCREAMING_SNAKE_CASE ):
__snake_case = a_i[j] + addend
__snake_case = divmod(SCREAMING_SNAKE_CASE , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return diff, i - start_i
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
__snake_case = digits[j] + addend
if s >= 10:
__snake_case = divmod(SCREAMING_SNAKE_CASE , 10 )
__snake_case = addend // 10 + quotient
else:
__snake_case = s
__snake_case = addend // 10
if addend == 0:
break
while addend > 0:
__snake_case = divmod(SCREAMING_SNAKE_CASE , 10 )
digits.append(SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10**15 ) -> List[str]:
"""simple docstring"""
__snake_case = [1]
__snake_case = 1
__snake_case = 0
while True:
__snake_case = next_term(SCREAMING_SNAKE_CASE , 20 , i + dn , SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
__snake_case = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _snake_case :
def __init__( self : int , UpperCAmelCase : int ):
__lowerCamelCase : List[Any] = num_of_nodes
__lowerCamelCase : list[list[int]] = []
__lowerCamelCase : dict[int, int] = {}
def lowerCamelCase__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCamelCase__ ( self : int , UpperCAmelCase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowerCamelCase : int = self.find_component(UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : list[int] , UpperCAmelCase : int , UpperCAmelCase : int ):
if component_size[u_node] <= component_size[v_node]:
__lowerCamelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
__lowerCamelCase : Optional[int] = self.find_component(UpperCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Tuple = 0
__lowerCamelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowerCamelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = edge
__lowerCamelCase : Optional[int] = self.m_component[u]
__lowerCamelCase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowerCamelCase : Dict = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = edge
__lowerCamelCase : str = self.m_component[u]
__lowerCamelCase : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowerCamelCase : Any = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def lowercase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCamelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ) -> Dict:
'''simple docstring'''
if exitstatus == 5:
__lowerCamelCase : Optional[int] = 0
# Doctest custom flag to ignore output.
__A = doctest.register_optionflag('''IGNORE_RESULT''')
__A = doctest.OutputChecker
class _snake_case ( a__ ):
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : int ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__A = CustomOutputChecker
__A = HfDoctestModule
__A = HfDocTestParser
| 366
| 1
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Dict , A_ : Union[str, Any] , A_ : Optional[Any]=1_3 , A_ : int=7 , A_ : Optional[int]=True , A_ : Optional[int]=True , A_ : Optional[Any]=True , A_ : Optional[Any]=True , A_ : str=9_9 , A_ : List[str]=3_2 , A_ : Dict=5 , A_ : Dict=4 , A_ : List[str]=3_7 , A_ : List[str]="gelu" , A_ : Optional[int]=0.1 , A_ : Optional[int]=0.1 , A_ : Any=5_1_2 , A_ : List[str]=1_6 , A_ : List[str]=2 , A_ : Union[str, Any]=0.02 , A_ : Tuple=False , A_ : Optional[int]=True , A_ : List[Any]="None" , A_ : Optional[int]=3 , A_ : Dict=4 , A_ : Union[str, Any]=None , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : List[str] = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : int = type_sequence_label_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Any = num_choices
lowerCAmelCase_ : List[Any] = relative_attention
lowerCAmelCase_ : Dict = position_biased_input
lowerCAmelCase_ : List[Any] = pos_att_type
lowerCAmelCase_ : List[str] = scope
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowerCAmelCase_ : int = None
if self.use_token_type_ids:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int]):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : Tuple , A_ : Optional[Any]):
self.parent.assertListEqual(list(result.loss.size()) , [])
def UpperCAmelCase__ ( self : Any , A_ : Optional[int] , A_ : str , A_ : int , A_ : Any , A_ : Tuple , A_ : List[Any] , A_ : Any):
lowerCAmelCase_ : Optional[int] = DebertaVaModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(A_ , attention_mask=A_ , token_type_ids=A_)[0]
lowerCAmelCase_ : Union[str, Any] = model(A_ , token_type_ids=A_)[0]
lowerCAmelCase_ : int = model(A_)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def UpperCAmelCase__ ( self : Tuple , A_ : Any , A_ : Any , A_ : int , A_ : Tuple , A_ : Tuple , A_ : Dict , A_ : str):
lowerCAmelCase_ : Optional[int] = DebertaVaForMaskedLM(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase__ ( self : Tuple , A_ : Optional[int] , A_ : Any , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : int , A_ : Union[str, Any]):
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : Tuple = DebertaVaForSequenceClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Any = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(A_)
def UpperCAmelCase__ ( self : Dict , A_ : Optional[int] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : str , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Any):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : Optional[Any] = DebertaVaForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : List[Any] , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : Optional[Any] , A_ : Tuple , A_ : Optional[int] , A_ : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = DebertaVaForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : int , A_ : Tuple , A_ : List[str] , A_ : List[Any] , A_ : Union[str, Any] , A_ : Any , A_ : str):
lowerCAmelCase_ : Dict = DebertaVaForMultipleChoice(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Any = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : Tuple = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : str = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Tuple = DebertaVaModelTester(self)
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : Optional[int]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*A_)
@slow
def UpperCAmelCase__ ( self : Any):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : int = DebertaVaModel.from_pretrained(A_)
self.assertIsNotNone(A_)
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def UpperCAmelCase__ ( self : List[str]):
pass
@slow
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''')
lowerCAmelCase_ : Dict = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowerCAmelCase_ : int = model(A_ , attention_mask=A_)[0]
# compare the actual values for a slice.
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4) , F"""{output[:, 1:4, 1:4]}""")
| 171
|
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase( __UpperCamelCase : str ):
def decorator(__UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = getattr(__UpperCamelCase ,'''handle_key''' ,[] )
handle += [key]
setattr(__UpperCamelCase ,'''handle_key''' ,__UpperCamelCase )
return func
return decorator
def UpperCamelCase( *__UpperCamelCase : List[str] ):
def decorator(__UpperCamelCase : List[str] ):
lowerCAmelCase_ : Any = getattr(__UpperCamelCase ,'''handle_key''' ,[] )
handle += keys
setattr(__UpperCamelCase ,'''handle_key''' ,__UpperCamelCase )
return func
return decorator
class __snake_case ( UpperCamelCase_ ):
def __new__( cls : Optional[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Tuple):
lowerCAmelCase_ : List[str] = super().__new__(cls , A_ , A_ , A_)
if not hasattr(A_ , '''key_handler'''):
setattr(A_ , '''key_handler''' , {})
setattr(A_ , '''handle_input''' , KeyHandler.handle_input)
for value in attrs.values():
lowerCAmelCase_ : Any = getattr(A_ , '''handle_key''' , [])
for key in handled_keys:
lowerCAmelCase_ : Optional[int] = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls : List[str]):
lowerCAmelCase_ : List[str] = get_character()
if char != KEYMAP["undefined"]:
lowerCAmelCase_ : Union[str, Any] = ord(A_)
lowerCAmelCase_ : Optional[int] = cls.key_handler.get(A_)
if handler:
lowerCAmelCase_ : Any = char
return handler(cls)
else:
return None
def UpperCamelCase( cls : List[str] ):
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 171
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
snake_case__ : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ) ->str:
_UpperCAmelCase =True
while ask_again:
_UpperCAmelCase =input(_lowerCamelCase )
try:
if default is not None and len(_lowerCamelCase ) == 0:
return default
return convert_value(_lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=[] , _lowerCamelCase=None , _lowerCamelCase=0 ) ->Any:
_UpperCAmelCase =BulletMenu(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase =menu.run(default_choice=_lowerCamelCase )
return convert_value(_lowerCamelCase ) if convert_value is not None else result
def lowerCamelCase__ ( _lowerCamelCase ) ->int:
_UpperCAmelCase =int(_lowerCamelCase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowerCamelCase__ ( _lowerCamelCase ) ->Optional[Any]:
_UpperCAmelCase =int(_lowerCamelCase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowerCamelCase__ ( _lowerCamelCase ) ->Tuple:
_UpperCAmelCase =int(_lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCamelCase__ ( _lowerCamelCase ) ->Optional[int]:
_UpperCAmelCase =int(_lowerCamelCase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowerCamelCase__ ( _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =int(_lowerCamelCase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowerCamelCase__ ( _lowerCamelCase ) ->Dict:
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =super()._format_usage(_snake_case , _snake_case , _snake_case , _snake_case )
_UpperCAmelCase =usage.replace("<command> [<args>] " , "" )
return usage
| 592
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=33 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =seq_length
_UpperCAmelCase =is_training
_UpperCAmelCase =use_input_mask
_UpperCAmelCase =use_token_type_ids
_UpperCAmelCase =use_labels
_UpperCAmelCase =vocab_size
_UpperCAmelCase =hidden_size
_UpperCAmelCase =num_hidden_layers
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =intermediate_size
_UpperCAmelCase =hidden_act
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_probs_dropout_prob
_UpperCAmelCase =max_position_embeddings
_UpperCAmelCase =type_vocab_size
_UpperCAmelCase =type_sequence_label_size
_UpperCAmelCase =initializer_range
_UpperCAmelCase =num_labels
_UpperCAmelCase =num_choices
_UpperCAmelCase =scope
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase =None
if self.use_input_mask:
_UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =None
if self.use_labels:
_UpperCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =EsmModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case )
_UpperCAmelCase =model(_snake_case )
_UpperCAmelCase =model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =EsmForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.num_labels
_UpperCAmelCase =EsmForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
_UpperCAmelCase =model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) =config_and_inputs
_UpperCAmelCase ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =False
snake_case =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case =()
snake_case =(
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case =True
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =EsmModelTester(self )
_UpperCAmelCase =ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase =type
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase =EsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()[0]
_UpperCAmelCase =EsmEmbeddings(config=_snake_case )
_UpperCAmelCase =torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_UpperCAmelCase =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_UpperCAmelCase =create_position_ids_from_input_ids(_snake_case , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs()[0]
_UpperCAmelCase =EsmEmbeddings(config=_snake_case )
_UpperCAmelCase =torch.empty(2 , 4 , 30 )
_UpperCAmelCase =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_UpperCAmelCase =torch.as_tensor([expected_single_positions, expected_single_positions] )
_UpperCAmelCase =embeddings.create_position_ids_from_inputs_embeds(_snake_case )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_snake_case , _snake_case ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch
class _a ( A__ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
_UpperCAmelCase =EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_UpperCAmelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase =model(_snake_case )[0]
_UpperCAmelCase =33
_UpperCAmelCase =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _snake_case )
_UpperCAmelCase =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
_UpperCAmelCase =EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
_UpperCAmelCase =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_UpperCAmelCase =model(_snake_case )[0]
# compare the actual values for a slice.
_UpperCAmelCase =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1E-4 ) )
| 592
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowercase ( __snake_case ):
def __init__(self : Dict , snake_case : str , snake_case : str=13 , snake_case : Union[str, Any]=7 , snake_case : int=True , snake_case : Any=True , snake_case : str=False , snake_case : Optional[Any]=True , snake_case : Optional[Any]=99 , snake_case : Dict=32 , snake_case : Union[str, Any]=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : Optional[int]="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[int]=0.1 , snake_case : List[Any]=512 , snake_case : List[Any]=16 , snake_case : Optional[int]=2 , snake_case : Tuple=0.02 , snake_case : Union[str, Any]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> List[Any]:
_lowercase : Dict = parent
_lowercase : int = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : int = is_training
_lowercase : Dict = use_input_mask
_lowercase : Union[str, Any] = use_token_type_ids
_lowercase : Tuple = use_labels
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : int = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : Optional[Any] = num_choices
_lowercase : str = scope
def _a(self : int ) -> Dict:
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Tuple = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Any ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : Dict ) -> Optional[int]:
_lowercase : Optional[int] = DistilBertModel(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[Any] = model(snake_case , snake_case )
_lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
_lowercase : Optional[int] = DistilBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a(self : Tuple , snake_case : List[str] , snake_case : Any , snake_case : List[str] , snake_case : Dict , snake_case : str , snake_case : str ) -> Any:
_lowercase : Dict = DistilBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[str] = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a(self : Union[str, Any] , snake_case : str , snake_case : Dict , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict ) -> Dict:
_lowercase : str = self.num_labels
_lowercase : Any = DistilBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a(self : int , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str ) -> str:
_lowercase : str = self.num_labels
_lowercase : List[str] = DistilBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : str = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a(self : List[str] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : int , snake_case : Union[str, Any] ) -> Optional[Any]:
_lowercase : str = self.num_choices
_lowercase : Dict = DistilBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a(self : List[str] ) -> List[str]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = config_and_inputs
_lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_A = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = True
_A = True
_A = True
_A = True
def _a(self : Dict ) -> List[Any]:
_lowercase : Optional[Any] = DistilBertModelTester(self )
_lowercase : str = ConfigTester(self , config_class=snake_case , dim=37 )
def _a(self : int ) -> List[str]:
self.config_tester.run_common_tests()
def _a(self : Optional[Any] ) -> Optional[int]:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case )
def _a(self : Any ) -> int:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case )
def _a(self : Dict ) -> List[Any]:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case )
def _a(self : str ) -> Tuple:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case )
def _a(self : Any ) -> List[Any]:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case )
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case )
@slow
def _a(self : Optional[Any] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = DistilBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@slow
@require_torch_gpu
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowercase : str = True
_lowercase : Tuple = model_class(config=snake_case )
_lowercase : str = self._prepare_for_class(snake_case , snake_case )
_lowercase : Optional[int] = torch.jit.trace(
snake_case , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case , os.path.join(snake_case , "traced_model.pt" ) )
_lowercase : Dict = torch.jit.load(os.path.join(snake_case , "traced_model.pt" ) , map_location=snake_case )
loaded(inputs_dict["input_ids"].to(snake_case ) , inputs_dict["attention_mask"].to(snake_case ) )
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : int ) -> str:
_lowercase : Any = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_lowercase : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowercase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
_lowercase : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
_lowercase : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1e-4 ) )
| 461
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ : int = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Any = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 461
| 1
|
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=snake_case__ ):
_a : str = ["""note_seq"""]
def __init__( self , *_A , **_A ):
"""simple docstring"""
requires_backends(self , ["note_seq"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , *_A , **_A ):
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
| 552
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Tuple = """nllb-moe"""
_a : Dict = ["""past_key_values"""]
_a : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _A=1_2_8_1_1_2 , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.05 , _A=0.05 , _A=True , _A=True , _A="relu" , _A=1_0_2_4 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.02 , _A=2 , _A=True , _A=False , _A="float32" , _A=False , _A=1_2_8 , _A=6_4 , _A=4 , _A=4 , _A=0.0_01 , _A=0.0_01 , _A="all" , _A=False , _A=False , _A=1.0 , _A=0.2 , _A=1 , _A=0 , _A=2 , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , **_A , )
| 552
| 1
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 554
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase = _calculate(days - 1 , UpperCAmelCase_ , 0 )
UpperCamelCase = state_late + state_absent + state_ontime
UpperCamelCase = prizestrings
return prizestrings
def lowerCamelCase__ ( UpperCAmelCase_ = 30 )-> int:
"""simple docstring"""
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 554
| 1
|
from __future__ import annotations
from typing import Generic, TypeVar
__lowercase : Any = TypeVar('''T''')
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = data
snake_case : Dict = self
snake_case : str = 0
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
snake_case : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# create a new set with x as its member
snake_case : Optional[Any] = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
snake_case : Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
snake_case : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case : Any = nodea
else:
snake_case : int = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) ,self.find_set(SCREAMING_SNAKE_CASE_ ) )
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
snake_case : dict[T, dict[T, int]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case : List[str] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
snake_case : str = weight
snake_case : Optional[int] = weight
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = []
snake_case : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
snake_case : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
snake_case : str = 0
snake_case : Any = 0
snake_case : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case , snake_case , snake_case : Union[str, Any] = edges[index]
index += 1
snake_case : Union[str, Any] = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return graph
| 315
|
from __future__ import annotations
from typing import Generic, TypeVar
__lowercase : Any = TypeVar('''T''')
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[int] = data
snake_case : Dict = self
snake_case : str = 0
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
snake_case : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# create a new set with x as its member
snake_case : Optional[Any] = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
snake_case : Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
snake_case : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case : Any = nodea
else:
snake_case : int = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) ,self.find_set(SCREAMING_SNAKE_CASE_ ) )
class _A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
snake_case : dict[T, dict[T, int]] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case : List[str] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
snake_case : str = weight
snake_case : Optional[int] = weight
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = []
snake_case : int = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
snake_case : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
snake_case : str = 0
snake_case : Any = 0
snake_case : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case , snake_case , snake_case : Union[str, Any] = edges[index]
index += 1
snake_case : Union[str, Any] = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return graph
| 315
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = "van"
def __init__( self ,snake_case__=224 ,snake_case__=3 ,snake_case__=[7, 3, 3, 3] ,snake_case__=[4, 2, 2, 2] ,snake_case__=[64, 128, 320, 512] ,snake_case__=[3, 3, 12, 3] ,snake_case__=[8, 8, 4, 4] ,snake_case__="gelu" ,snake_case__=0.02 ,snake_case__=1E-6 ,snake_case__=1E-2 ,snake_case__=0.0 ,snake_case__=0.0 ,**snake_case__ ,):
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = patch_sizes
SCREAMING_SNAKE_CASE_ : str = strides
SCREAMING_SNAKE_CASE_ : str = hidden_sizes
SCREAMING_SNAKE_CASE_ : str = depths
SCREAMING_SNAKE_CASE_ : Optional[Any] = mlp_ratios
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = layer_scale_init_value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[str] = dropout_rate
| 105
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353
| 0
|
"""simple docstring"""
from __future__ import annotations
_a : Optional[Any] = [True] * 1_000_001
_a : Optional[Any] = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_a : Optional[int] = False
i += 1
def a__ ( a : int ):
"""simple docstring"""
return seive[n]
def a__ ( a : int ):
"""simple docstring"""
return any(digit in "02468" for digit in str(a ) )
def a__ ( a : int = 1_000_000 ):
"""simple docstring"""
_snake_case : Union[str, Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(a ) and not contains_an_even_digit(a ):
_snake_case : Any = str(a )
_snake_case : int = [int(str_num[j:] + str_num[:j] ) for j in range(len(a ) )]
if all(is_prime(a ) for i in list_nums ):
result.append(a )
return result
def a__ ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 87
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
A : List[Any] = parser.parse_args()
A : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
A : Tuple = CLIPImageProcessor()
A : int = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
A : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 176
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Optional[int] = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 176
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> int:
"""simple docstring"""
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def _SCREAMING_SNAKE_CASE ( A : Tuple ) -> List[str]:
"""simple docstring"""
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a_ ( unittest.TestCase , UpperCamelCase_ ):
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = load_tool('text-to-speech')
self.tool.setup()
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Dict = self.tool('hey')
__snake_case : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
__snake_case : Any = self.tool('hey')
__snake_case : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
| 61
| 0
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Optional[Any] = HfArgumentParser(_UpperCamelCase )
_lowercase: Optional[int] = parser.parse_args_into_dataclasses()[0]
_lowercase: Any = TensorFlowBenchmark(args=_UpperCamelCase )
try:
_lowercase: Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowercase: Dict = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_lowercase: List[Any] = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
_lowercase: Any = ''''''
_lowercase: str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
_lowercase: int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_lowercase: List[Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 353
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def lowercase_ ( *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __magic_name__ ( unittest.TestCase ):
@require_torch
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Optional[Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_lowercase: List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: str = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A_ ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
_lowercase: Any = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
] , )
@require_tf
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Union[str, Any] = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_lowercase: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Union[str, Any] = image_classifier(A_ , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(A_ ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
_lowercase: List[str] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
[
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
{'''score''': 0.3_33, '''label''': ANY(A_ )},
],
] , )
@slow
@require_torch
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: Tuple = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_lowercase: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Optional[Any] = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowercase: Optional[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Tuple = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_lowercase: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_lowercase: Optional[int] = image_classifier(A_ , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(A_ ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_lowercase: Optional[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
| 353
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case : str = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__UpperCamelCase ), magnitude * sin(__UpperCamelCase )]
return [magnitude * cos(radians(__UpperCamelCase ) ), magnitude * sin(radians(__UpperCamelCase ) )]
def lowercase__ ( __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : float = 10**-1 ):
'''simple docstring'''
__lowercase = cross(__UpperCamelCase , __UpperCamelCase )
__lowercase = sum(__UpperCamelCase )
return abs(__UpperCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
snake_case : List[Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
snake_case : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
snake_case : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
snake_case : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
snake_case : List[Any] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
snake_case : str = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 339
| 0
|
"""simple docstring"""
import math
def UpperCamelCase (SCREAMING_SNAKE_CASE = 100 ):
UpperCamelCase : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) )
UpperCamelCase : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 102
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : Any = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """gptj"""
__lowerCAmelCase : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=5_0_4_0_0 , _A=2_0_4_8 , _A=4_0_9_6 , _A=2_8 , _A=1_6 , _A=6_4 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.02 , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , **_A , ):
'''simple docstring'''
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Any = n_positions
UpperCamelCase : List[str] = n_embd
UpperCamelCase : List[str] = n_layer
UpperCamelCase : Optional[int] = n_head
UpperCamelCase : int = n_inner
UpperCamelCase : Optional[Any] = rotary_dim
UpperCamelCase : Optional[int] = activation_function
UpperCamelCase : str = resid_pdrop
UpperCamelCase : Union[str, Any] = embd_pdrop
UpperCamelCase : Optional[Any] = attn_pdrop
UpperCamelCase : Optional[int] = layer_norm_epsilon
UpperCamelCase : Any = initializer_range
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : List[Any] = bos_token_id
UpperCamelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
'''simple docstring'''
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , """pad_token_id""" , _A ):
# TODO: how to do that better?
UpperCamelCase : Optional[Any] = 0
@property
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction="""inputs""" )
UpperCamelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCamelCase : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def _a ( self ):
'''simple docstring'''
return self._config.n_head
def _a ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase , UpperCamelCase : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase : Dict = seqlen + 2
UpperCamelCase : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : List[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
UpperCamelCase : str = common_inputs["""attention_mask"""]
if self.use_past:
UpperCamelCase : Any = ordered_inputs["""attention_mask"""].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def _a ( self ):
'''simple docstring'''
return 1_3
| 102
| 1
|
import math
import sys
def UpperCAmelCase ( snake_case : int ):
if number != int(snake_case ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
_lowerCAmelCase:Optional[Any] = [-1] * (number + 1)
_lowerCAmelCase:Dict = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase:int = sys.maxsize
_lowerCAmelCase:List[Any] = int(math.sqrt(snake_case ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase:int = 1 + answers[i - (j**2)]
_lowerCAmelCase:Optional[Any] = min(snake_case , snake_case )
_lowerCAmelCase:List[Any] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
import baseaa
def UpperCAmelCase ( snake_case : str ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def UpperCAmelCase ( snake_case : bytes ):
return baseaa.aaadecode(snake_case ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 439
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCamelCase ( unittest.TestCase , UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Tuple ) -> List[Any]:
lowerCAmelCase__ = load_tool("text-to-speech" )
self.tool.setup()
def a ( self : Dict ) -> Optional[int]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCAmelCase__ = self.tool("hey" )
lowerCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def a ( self : List[Any] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCAmelCase__ = self.tool("hey" )
lowerCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 61
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model'}
UpperCamelCase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
UpperCamelCase = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = []
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : Any="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[MASK]" , SCREAMING_SNAKE_CASE__ : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> None:
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def a ( self : List[str] ) -> List[str]:
return self.sp_model.get_piece_size()
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Any:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
lowerCAmelCase__ = kwargs.pop("use_source_tokenizer" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = []
sub_texts.append(SCREAMING_SNAKE_CASE__ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase__ = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(SCREAMING_SNAKE_CASE__ ) )
else:
lowerCAmelCase__ = "".join(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase__ = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ )
return clean_text
else:
return text
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 61
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='llama'
__a =['past_key_values']
def __init__( self : Optional[int] , __a : List[Any]=3_20_00 , __a : Optional[int]=40_96 , __a : Any=1_10_08 , __a : Dict=32 , __a : List[Any]=32 , __a : Tuple=None , __a : str="silu" , __a : Dict=20_48 , __a : str=0.02 , __a : List[str]=1e-6 , __a : Any=True , __a : str=0 , __a : Optional[Any]=1 , __a : Tuple=2 , __a : List[Any]=1 , __a : List[Any]=False , __a : Optional[int]=None , **__a : str , ):
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_a = num_attention_heads
_a = num_key_value_heads
_a = hidden_act
_a = initializer_range
_a = rms_norm_eps
_a = pretraining_tp
_a = use_cache
_a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def UpperCamelCase__ ( self : Dict ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
_a = self.rope_scaling.get("type" , __a )
_a = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 720
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='roformer'
def __init__( self : Optional[Any] , __a : Dict=5_00_00 , __a : Any=None , __a : Tuple=7_68 , __a : Optional[Any]=12 , __a : Optional[Any]=12 , __a : List[Any]=30_72 , __a : Dict="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=15_36 , __a : Tuple=2 , __a : List[str]=0.02 , __a : Dict=1e-1_2 , __a : Optional[Any]=0 , __a : Any=False , __a : Tuple=True , **__a : str , ):
super().__init__(pad_token_id=__a , **__a )
_a = vocab_size
_a = hidden_size if embedding_size is None else embedding_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = rotary_value
_a = use_cache
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Any ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 521
| 0
|
from __future__ import annotations
import math
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = size
# approximate the overall size of segment tree with given value
snake_case : Dict = [0 for i in range(0 ,4 * size )]
# create array to store lazy update
snake_case : List[Any] = [0 for i in range(0 ,4 * size )]
snake_case : Any = [0 for i in range(0 ,4 * size )] # flag for lazy update
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return idx * 2
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return idx * 2 + 1
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if left_element == right_element:
snake_case : int = a[left_element - 1]
else:
snake_case : List[str] = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.build(self.right(SCREAMING_SNAKE_CASE_ ) ,mid + 1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : str = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] ,self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.flag[idx] is True:
snake_case : int = self.lazy[idx]
snake_case : List[str] = False
if left_element != right_element:
snake_case : int = self.lazy[idx]
snake_case : List[str] = self.lazy[idx]
snake_case : List[Any] = True
snake_case : Union[str, Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
snake_case : Optional[Any] = val
if left_element != right_element:
snake_case : str = val
snake_case : Optional[Any] = val
snake_case : Optional[Any] = True
snake_case : List[Any] = True
return True
snake_case : List[str] = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.update(self.right(SCREAMING_SNAKE_CASE_ ) ,mid + 1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )] ,self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
return True
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if self.flag[idx] is True:
snake_case : List[Any] = self.lazy[idx]
snake_case : List[Any] = False
if left_element != right_element:
snake_case : List[str] = self.lazy[idx]
snake_case : int = self.lazy[idx]
snake_case : int = True
snake_case : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
snake_case : List[Any] = (left_element + right_element) // 2
snake_case : List[str] = self.query(self.left(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = self.query(self.right(SCREAMING_SNAKE_CASE_ ) ,mid + 1 ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return max(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __str__( self ):
'''simple docstring'''
return str([self.query(1 ,1 ,self.size ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for i in range(1 ,self.size + 1 )] )
if __name__ == "__main__":
__lowercase : Union[str, Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__lowercase : Union[str, Any] = 15
__lowercase : Union[str, Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 36
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCamelCase : List[Any] = '\nHuman: <<task>>\n\nAssistant: '
__lowerCamelCase : Dict = 'huggingface-tools/default-prompts'
__lowerCamelCase : Optional[Any] = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
_UpperCamelCase =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
_UpperCamelCase =cached_file(
__SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 404
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def lowercase__( ):
snake_case__ : Any = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=A , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=A , type=A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=A , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
snake_case__ : Tuple = parser.parse_args()
return args
def lowercase__( A ):
def fn(A ):
return tokenizer(examples['text'] )
return fn
def lowercase__( A ):
snake_case__ : str = []
for i in range(len(tokenized_data['input_ids'] ) ):
snake_case__ : List[str] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
snake_case__ : Union[str, Any] = tf.train.Features(feature=A )
snake_case__ : List[Any] = tf.train.Example(features=A )
snake_case__ : Optional[int] = example.SerializeToString()
records.append(A )
return records
def lowercase__( A ):
snake_case__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case__ : Any = min(len(A ) , args.limit )
snake_case__ : int = dataset.select(range(A ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case__ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case__ : Optional[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
snake_case__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case__ : Tuple = tokenize_function(A )
snake_case__ : Optional[int] = dataset.map(A , batched=A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A ):
# Concatenate all texts.
snake_case__ : str = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case__ : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case__ : Optional[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case__ : Optional[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case__ : Dict = dataset_tokenized.map(A , batched=A , batch_size=1_0_0_0 , num_proc=4 )
snake_case__ : Optional[Any] = 0
snake_case__ : int = 0
for shard in range(0 , len(A ) , args.shard_size ):
snake_case__ : str = grouped_dataset[shard : shard + args.shard_size]
snake_case__ : Any = len(dataset_snapshot['input_ids'] )
snake_case__ : int = os.path.join(A , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case__ : Tuple = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
snake_case__ : List[str] = serialized_examples[i]
out_file.write(A )
print('Wrote file {} containing {} records'.format(A , A ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=A )
if __name__ == "__main__":
lowerCamelCase : List[str] = parse_args()
main(args)
| 303
|
import sys
from collections import defaultdict
class snake_case__ :
def __init__( self : List[Any] ):
snake_case__ : Dict = []
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Tuple ):
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str ):
snake_case__ : Union[str, Any] = pos
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case__ : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case__ : Optional[int] = 2 * start + 1
else:
snake_case__ : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case__ , snake_case__ : int = heap[smallest_child], positions[smallest_child]
snake_case__ , snake_case__ : str = (
heap[start],
positions[start],
)
snake_case__ , snake_case__ : int = temp, tempa
snake_case__ : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _lowerCamelCase )
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] ):
snake_case__ : Optional[Any] = position[index]
while index != 0:
snake_case__ : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case__ : Optional[Any] = heap[parent]
snake_case__ : Dict = position[parent]
self.set_position(position[parent] , _lowerCamelCase )
else:
snake_case__ : Tuple = val
snake_case__ : Optional[Any] = temp
self.set_position(_lowerCamelCase , _lowerCamelCase )
break
snake_case__ : Optional[int] = parent
else:
snake_case__ : List[str] = val
snake_case__ : List[Any] = temp
self.set_position(_lowerCamelCase , 0 )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
snake_case__ : int = len(_lowerCamelCase ) // 2 - 1
for i in range(_lowerCamelCase , -1 , -1 ):
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
snake_case__ : Any = positions[0]
snake_case__ : List[str] = sys.maxsize
self.top_to_bottom(_lowerCamelCase , 0 , len(_lowerCamelCase ) , _lowerCamelCase )
return temp
def lowercase__( A ):
snake_case__ : int = Heap()
snake_case__ : Optional[int] = [0] * len(A )
snake_case__ : Any = [-1] * len(A ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
snake_case__ : Dict = []
for vertex in range(len(A ) ):
distance_tv.append(sys.maxsize )
positions.append(A )
heap.node_position.append(A )
snake_case__ : Tuple = []
snake_case__ : int = 1
snake_case__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = distance
heap.heapify(A , A )
for _ in range(1 , len(A ) ):
snake_case__ : Tuple = heap.delete_minimum(A , A )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case__ : List[str] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A )]
):
snake_case__ : Any = distance
heap.bottom_to_top(
A , heap.get_position(A ) , A , A )
snake_case__ : Union[str, Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase : Union[str, Any] = int(input('Enter number of edges: ').strip())
lowerCamelCase : str = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = DPTConfig()
if "large" in checkpoint_url:
lowercase__ = 1024
lowercase__ = 4096
lowercase__ = 24
lowercase__ = 16
lowercase__ = [5, 11, 17, 23]
lowercase__ = [256, 512, 1024, 1024]
lowercase__ = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ = True
lowercase__ = 150
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''ade20k-id2label.json'''
lowercase__ = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = [1, 150, 480, 480]
return config, expected_shape
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
lowercase__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
lowercase__ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
lowercase__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
lowercase__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowercase__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
lowercase__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
lowercase__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
lowercase__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
lowercase__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
lowercase__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
lowercase__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
lowercase__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
lowercase__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
lowercase__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowercase__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
lowercase__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
lowercase__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
lowercase__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
lowercase__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowercase__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowercase__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowercase__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowercase__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowercase__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowercase__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowercase__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
lowercase__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
lowercase__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
lowercase__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
lowercase__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowercase__ = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: config.hidden_size, :]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def a ( ):
'''simple docstring'''
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = get_dpt_config(lowerCamelCase_ )
# load original state_dict from URL
lowercase__ = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowerCamelCase_ )
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(lowerCamelCase_ )
lowercase__ = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
lowercase__ = DPTForSemanticSegmentation(lowerCamelCase_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# Check outputs on an image
lowercase__ = 480 if '''ade''' in checkpoint_url else 384
lowercase__ = DPTImageProcessor(size=lowerCamelCase_ )
lowercase__ = prepare_img()
lowercase__ = image_processor(lowerCamelCase_ , return_tensors='''pt''' )
# forward pass
lowercase__ = model(**lowerCamelCase_ ).logits if '''ade''' in checkpoint_url else model(**lowerCamelCase_ ).predicted_depth
# Assert logits
lowercase__ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
lowercase__ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(lowerCamelCase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCamelCase_ )
)
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCamelCase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase_ , lowerCamelCase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
A__ : List[str] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 183
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
A__ : List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__a: str = get_tests_dir("""fixtures""")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> str:
# A mock response for an HTTP head request to emulate server down
lowercase__ : Any = mock.Mock()
lowercase__ : Dict = 500
lowercase__ : Dict = {}
lowercase__ : Optional[int] = HTTPError
lowercase__ : int = {}
# Download this model to make sure it's in the cache.
lowercase__ : Any = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
lowercase__ : Optional[int] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase( self ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _lowerCAmelCase( self ) -> List[str]:
with self.assertRaises(__lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowercase__ : str = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(__lowerCAmelCase )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase( cls ) -> List[str]:
lowercase__ : str = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _lowerCAmelCase( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : str = ViTImageProcessor.from_pretrained(__lowerCAmelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
lowercase__ : Tuple = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCAmelCase , repo_id='''test-image-processor''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase__ : Tuple = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Dict = ViTImageProcessor.from_pretrained(__lowerCAmelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
lowercase__ : List[Any] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCAmelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase__ : Tuple = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase( self ) -> List[Any]:
CustomImageProcessor.register_for_auto_class()
lowercase__ : Dict = CustomImageProcessor.from_pretrained(__lowerCAmelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 719
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : Optional[Any] = 0
count += depth_first_search(UpperCAmelCase , row + 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , row - 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col + 1 , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col - 1 , UpperCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase__ : Any , lowercase__ : List[Any]=7 , lowercase__ : List[str]=3 , lowercase__ : str=18 , lowercase__ : List[Any]=30 , lowercase__ : Optional[int]=4_00 , lowercase__ : Dict=True , lowercase__ : List[str]=None , lowercase__ : int=True , lowercase__ : Tuple=None , lowercase__ : int=True , lowercase__ : Tuple=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowercase__ : Optional[int]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowercase__ : Any=True , ):
_lowerCAmelCase = size if size is not None else {'height': 2_24, 'width': 2_24}
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Tuple=False , lowercase__ : List[Any]=False , lowercase__ : str=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowerCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_lowerCAmelCase = []
for i in range(self.batch_size ):
_lowerCAmelCase , _lowerCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_lowerCAmelCase = [torch.from_numpy(lowercase__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=lowercase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase__ , 'image_std' ) )
self.assertTrue(hasattr(lowercase__ , 'do_convert_rgb' ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowercase__ )
_lowerCAmelCase = 3
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
self.assertTrue(hasattr(lowercase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'center_crop' ) )
self.assertTrue(hasattr(lowercase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase__ , 'image_std' ) )
self.assertTrue(hasattr(lowercase__ , 'do_convert_rgb' ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 192
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__( self : Optional[Any] , lowercase__ : int , lowercase__ : List[str]=12 , lowercase__ : Optional[int]=7 , lowercase__ : List[Any]=True , lowercase__ : str=True , lowercase__ : Optional[Any]=True , lowercase__ : List[str]=99 , lowercase__ : Optional[int]=32 , lowercase__ : Tuple=32 , lowercase__ : Dict=2 , lowercase__ : Optional[int]=4 , lowercase__ : str=37 , lowercase__ : Optional[Any]=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Union[str, Any]=5_12 , lowercase__ : List[str]=0.0_2 , lowercase__ : Tuple=0 , lowercase__ : List[Any]=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = projection_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCAmelCase = input_mask.numpy()
_lowerCAmelCase , _lowerCAmelCase = input_mask.shape
_lowerCAmelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase__ ):
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = TFBlipTextModel(config=lowercase__ )
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , training=lowercase__ )
_lowerCAmelCase = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = BlipTextModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFBlipTextModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Any=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowercase__ )
| 192
| 1
|
"""simple docstring"""
from math import factorial
def __lowercase ( a : Tuple , a : str ) -> Tuple:
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(lowerCamelCase_ ) // (factorial(lowerCamelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 713
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _UpperCamelCase ( self : Dict , a : Optional[Any]=0 ):
"""simple docstring"""
__snake_case : List[str] =np.random.RandomState(a )
__snake_case : Union[str, Any] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : str =np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Dict =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Optional[Any] =np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : int =self.get_dummy_inputs()
__snake_case : List[str] =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : int =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Optional[Any] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : str =self.get_dummy_inputs()
__snake_case : int =pipe(**a ).images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Union[str, Any] =np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] =self.get_dummy_inputs()
__snake_case : Dict =pipe(**a ).images
__snake_case : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Tuple =np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Tuple =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Any =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a )
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Tuple =pipe(**a ).images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case : Dict =np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs['''prompt''']]
# forward
__snake_case : Any =pipe(**a )
__snake_case : str =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Any =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[Any] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : List[Any] =text_inputs['''input_ids''']
__snake_case : str =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__snake_case : Optional[Any] =prompt_embeds
# forward
__snake_case : Dict =pipe(**a )
__snake_case : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : Dict =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] =self.get_dummy_inputs()
__snake_case : Optional[Any] =3 * ['''this is a negative prompt''']
__snake_case : List[str] =negative_prompt
__snake_case : str =3 * [inputs['''prompt''']]
# forward
__snake_case : int =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
__snake_case : Tuple =self.get_dummy_inputs()
__snake_case : Union[str, Any] =3 * [inputs.pop('''prompt''' )]
__snake_case : Optional[int] =[]
for p in [prompt, negative_prompt]:
__snake_case : Optional[int] =pipe.tokenizer(
a , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=a , return_tensors='''np''' , )
__snake_case : Optional[Any] =text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__snake_case , __snake_case : Optional[Any] =embeds
# forward
__snake_case : Any =pipe(**a )
__snake_case : Union[str, Any] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[str] =ort.SessionOptions()
__snake_case : Optional[int] =False
return options
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : List[str] ='''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__snake_case : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Any =np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : List[str] =DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[Any] ='''open neural network exchange'''
__snake_case : Optional[int] =np.random.RandomState(0 )
__snake_case : int =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Union[str, Any] =output.images
__snake_case : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Union[str, Any] =np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a )
__snake_case : Optional[int] ='''open neural network exchange'''
__snake_case : Optional[Any] =np.random.RandomState(0 )
__snake_case : Any =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a , output_type='''np''' )
__snake_case : Optional[int] =output.images
__snake_case : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case : Optional[int] =np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Union[str, Any] =0
def test_callback_fn(a : int , a : int , a : np.ndarray ) -> None:
__snake_case : Dict =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : Union[str, Any] =latents[0, -3:, -3:, -1]
__snake_case : str =np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case : List[Any] =latents[0, -3:, -3:, -1]
__snake_case : List[Any] =np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__snake_case : str =False
__snake_case : int =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
__snake_case : List[Any] ='''Andromeda galaxy in a bottle'''
__snake_case : Optional[int] =np.random.RandomState(0 )
pipe(
prompt=a , num_inference_steps=5 , guidance_scale=7.5 , generator=a , callback=a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(a , a )
assert pipe.safety_checker is None
__snake_case : int =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__snake_case : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case : Any =pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 497
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : int = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=8 ):
a__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict=5_1_2 , __lowerCAmelCase : str=5_1_2 ):
a__ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
a__ = np.array(pil_image.convert('RGB' ) )
a__ = arr.astype(np.floataa ) / 127.5 - 1
a__ = np.transpose(__lowerCAmelCase , [2, 0, 1] )
a__ = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[Any] ,__snake_case :UNetaDConditionModel ,__snake_case :DDPMScheduler ,__snake_case :VQModel ,) -> Optional[int]:
super().__init__()
self.register_modules(
unet=__snake_case ,scheduler=__snake_case ,movq=__snake_case ,)
a__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :List[Any] ) -> Tuple:
# get the original timestep using init_timestep
a__ = min(int(num_inference_steps * strength ) ,__snake_case )
a__ = max(num_inference_steps - init_timestep ,0 )
a__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__( self :Any ,__snake_case :List[Any] ,__snake_case :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Tuple ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any]=None ) -> Dict:
if not isinstance(__snake_case ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}' )
a__ = image.to(device=__snake_case ,dtype=__snake_case )
a__ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
a__ = image
else:
if isinstance(__snake_case ,__snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(__snake_case ,__snake_case ):
a__ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a__ = torch.cat(__snake_case ,dim=0 )
else:
a__ = self.movq.encode(__snake_case ).latent_dist.sample(__snake_case )
a__ = self.movq.config.scaling_factor * init_latents
a__ = torch.cat([init_latents] ,dim=0 )
a__ = init_latents.shape
a__ = randn_tensor(__snake_case ,generator=__snake_case ,device=__snake_case ,dtype=__snake_case )
# get latents
a__ = self.scheduler.add_noise(__snake_case ,__snake_case ,__snake_case )
a__ = init_latents
return latents
def lowerCamelCase__( self :Dict ,__snake_case :Union[str, Any]=0 ) -> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a__ = torch.device(F'cuda:{gpu_id}' )
a__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Tuple=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a__ = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
a__ , a__ = cpu_offload_with_hook(__snake_case ,__snake_case ,prev_module_hook=__snake_case )
# We'll offload the last model manually.
a__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__( self :List[str] ) -> Tuple:
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self :int ,__snake_case :Union[torch.FloatTensor, List[torch.FloatTensor]] ,__snake_case :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__snake_case :Union[torch.FloatTensor, List[torch.FloatTensor]] ,__snake_case :int = 5_12 ,__snake_case :int = 5_12 ,__snake_case :int = 1_00 ,__snake_case :float = 4.0 ,__snake_case :float = 0.3 ,__snake_case :int = 1 ,__snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__snake_case :Optional[str] = "pil" ,__snake_case :bool = True ,) -> Dict:
a__ = self._execution_device
a__ = guidance_scale > 1.0
if isinstance(__snake_case ,__snake_case ):
a__ = torch.cat(__snake_case ,dim=0 )
a__ = image_embeds.shape[0]
if isinstance(__snake_case ,__snake_case ):
a__ = torch.cat(__snake_case ,dim=0 )
if do_classifier_free_guidance:
a__ = image_embeds.repeat_interleave(__snake_case ,dim=0 )
a__ = negative_image_embeds.repeat_interleave(__snake_case ,dim=0 )
a__ = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__snake_case )
if not isinstance(__snake_case ,__snake_case ):
a__ = [image]
if not all(isinstance(__snake_case ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(__snake_case ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
a__ = torch.cat([prepare_image(__snake_case ,__snake_case ,__snake_case ) for i in image] ,dim=0 )
a__ = image.to(dtype=image_embeds.dtype ,device=__snake_case )
a__ = self.movq.encode(__snake_case )['latents']
a__ = latents.repeat_interleave(__snake_case ,dim=0 )
self.scheduler.set_timesteps(__snake_case ,device=__snake_case )
a__ , a__ = self.get_timesteps(__snake_case ,__snake_case ,__snake_case )
a__ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
a__ , a__ = downscale_height_and_width(__snake_case ,__snake_case ,self.movq_scale_factor )
a__ = self.prepare_latents(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,image_embeds.dtype ,__snake_case ,__snake_case )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
a__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ = {'image_embeds': image_embeds}
a__ = self.unet(
sample=__snake_case ,timestep=__snake_case ,encoder_hidden_states=__snake_case ,added_cond_kwargs=__snake_case ,return_dict=__snake_case ,)[0]
if do_classifier_free_guidance:
a__ , a__ = noise_pred.split(latents.shape[1] ,dim=1 )
a__ , a__ = noise_pred.chunk(2 )
a__ , a__ = variance_pred.chunk(2 )
a__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a__ = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a__ , a__ = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a__ = self.scheduler.step(
__snake_case ,__snake_case ,__snake_case ,generator=__snake_case ,)[0]
# post-processing
a__ = self.movq.decode(__snake_case ,force_not_quantize=__snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
a__ = image * 0.5 + 0.5
a__ = image.clamp(0 ,1 )
a__ = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
a__ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 335
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case : str = logging.get_logger(__name__)
snake_case : List[str] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = '''deformable_detr'''
UpperCAmelCase__ : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self :Optional[int] ,__snake_case :List[Any]=True ,__snake_case :str=None ,__snake_case :Optional[Any]=3 ,__snake_case :int=3_00 ,__snake_case :Optional[int]=10_24 ,__snake_case :Union[str, Any]=6 ,__snake_case :Optional[int]=10_24 ,__snake_case :List[str]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :int=10_24 ,__snake_case :List[str]=8 ,__snake_case :List[str]=0.0 ,__snake_case :Optional[int]=True ,__snake_case :Any="relu" ,__snake_case :List[str]=2_56 ,__snake_case :List[str]=0.1 ,__snake_case :Dict=0.0 ,__snake_case :Optional[int]=0.0 ,__snake_case :List[Any]=0.02 ,__snake_case :Union[str, Any]=1.0 ,__snake_case :List[str]=True ,__snake_case :Union[str, Any]=False ,__snake_case :List[Any]="sine" ,__snake_case :Tuple="resnet50" ,__snake_case :Dict=True ,__snake_case :Tuple=False ,__snake_case :str=4 ,__snake_case :Union[str, Any]=4 ,__snake_case :List[Any]=4 ,__snake_case :Optional[Any]=False ,__snake_case :str=3_00 ,__snake_case :Tuple=False ,__snake_case :Union[str, Any]=1 ,__snake_case :str=5 ,__snake_case :str=2 ,__snake_case :Dict=1 ,__snake_case :Any=1 ,__snake_case :Union[str, Any]=5 ,__snake_case :Tuple=2 ,__snake_case :Any=0.1 ,__snake_case :str=0.25 ,__snake_case :int=False ,**__snake_case :Optional[int] ,) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
a__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__snake_case ,__snake_case ):
a__ = backbone_config.get('model_type' )
a__ = CONFIG_MAPPING[backbone_model_type]
a__ = config_class.from_dict(__snake_case )
a__ = use_timm_backbone
a__ = backbone_config
a__ = num_channels
a__ = num_queries
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = init_xavier_std
a__ = encoder_layerdrop
a__ = auxiliary_loss
a__ = position_embedding_type
a__ = backbone
a__ = use_pretrained_backbone
a__ = dilation
# deformable attributes
a__ = num_feature_levels
a__ = encoder_n_points
a__ = decoder_n_points
a__ = two_stage
a__ = two_stage_num_proposals
a__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
a__ = class_cost
a__ = bbox_cost
a__ = giou_cost
# Loss coefficients
a__ = mask_loss_coefficient
a__ = dice_loss_coefficient
a__ = bbox_loss_coefficient
a__ = giou_loss_coefficient
a__ = eos_coefficient
a__ = focal_alpha
a__ = disable_custom_kernels
super().__init__(is_encoder_decoder=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> int:
return self.encoder_attention_heads
@property
def lowerCamelCase__( self :int ) -> int:
return self.d_model
def lowerCamelCase__( self :List[str] ) -> str:
a__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a__ = self.backbone_config.to_dict()
a__ = self.__class__.model_type
return output
| 335
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _snake_case ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = data
def __iter__( self ):
'''simple docstring'''
for element in self.data:
yield element
def snake_case ( snake_case : Tuple=True ) -> str:
"""simple docstring"""
lowerCAmelCase = Accelerator(even_batches=snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def snake_case ( snake_case : Accelerator , snake_case : int , snake_case : int , snake_case : bool = False ) -> List[Any]:
"""simple docstring"""
if iterable:
lowerCAmelCase = DummyIterableDataset(torch.as_tensor(range(snake_case ) ) )
else:
lowerCAmelCase = TensorDataset(torch.as_tensor(range(snake_case ) ) )
lowerCAmelCase = DataLoader(snake_case , batch_size=snake_case )
lowerCAmelCase = accelerator.prepare(snake_case )
return dl
def snake_case ( snake_case : Accelerator , snake_case : int , snake_case : int , snake_case : List[int] , snake_case : List[int] , ) -> str:
"""simple docstring"""
lowerCAmelCase = create_dataloader(accelerator=snake_case , dataset_size=snake_case , batch_size=snake_case )
lowerCAmelCase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def snake_case ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase = create_accelerator(even_batches=snake_case )
verify_dataloader_batch_sizes(
snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = create_accelerator(even_batches=snake_case )
lowerCAmelCase = torch.nn.Linear(1 , 1 )
lowerCAmelCase = accelerator.prepare(snake_case )
lowerCAmelCase = create_dataloader(snake_case , dataset_size=3 , batch_size=1 )
lowerCAmelCase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(snake_case ):
lowerCAmelCase = ddp_model(batch[0].float() )
lowerCAmelCase = output.sum()
loss.backward()
batch_idxs.append(snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def snake_case ( snake_case : int ) -> int:
"""simple docstring"""
with warnings.catch_warnings(record=snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def snake_case ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = create_accelerator(even_batches=snake_case )
lowerCAmelCase = torch.nn.Linear(1 , 1 )
lowerCAmelCase = accelerator.prepare(snake_case )
lowerCAmelCase = create_dataloader(snake_case , dataset_size=3 , batch_size=1 )
lowerCAmelCase = create_dataloader(snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=snake_case ):
lowerCAmelCase = train_dl.batch_sampler.even_batches
lowerCAmelCase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def snake_case ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = create_accelerator(even_batches=snake_case )
lowerCAmelCase = torch.nn.Linear(1 , 1 )
lowerCAmelCase = accelerator.prepare(snake_case )
create_dataloader(snake_case , dataset_size=3 , batch_size=1 , iterable=snake_case )
lowerCAmelCase = create_dataloader(snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=snake_case ):
lowerCAmelCase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = create_accelerator()
lowerCAmelCase = torch.nn.Linear(1 , 1 )
lowerCAmelCase = accelerator.prepare(snake_case )
create_dataloader(snake_case , dataset_size=3 , batch_size=1 , iterable=snake_case )
with warnings.catch_warnings(record=snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=snake_case ):
pass
assert issubclass(w[-1].category , snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def snake_case ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
lowerCAmelCase = accelerator.state.distributed_type
lowerCAmelCase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(snake_case )
lowerCAmelCase = original_state
if __name__ == "__main__":
main()
| 514
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(snake_case , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def snake_case ( snake_case : Optional[int] , snake_case : str ) -> str:
"""simple docstring"""
lowerCAmelCase = _distribute_shards(**snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def snake_case ( snake_case : Optional[int] , snake_case : int , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = _split_gen_kwargs(snake_case , snake_case )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def snake_case ( snake_case : Union[str, Any] , snake_case : Optional[int] ) -> int:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(snake_case ):
_number_of_shards_in_gen_kwargs(snake_case )
else:
lowerCAmelCase = _number_of_shards_in_gen_kwargs(snake_case )
assert out == expected
| 514
| 1
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__A = "scheduler_config.json"
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : str = 1
lowerCamelCase : int = 2
lowerCamelCase : Any = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : Dict = 5
lowerCamelCase : Optional[int] = 6
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : Union[str, Any] = 8
lowerCamelCase : str = 9
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : Tuple = 11
lowerCamelCase : Dict = 12
lowerCamelCase : int = 13
lowerCamelCase : List[Any] = 14
@dataclass
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : torch.FloatTensor
class _A :
"""simple docstring"""
lowerCamelCase : Optional[Any] = SCHEDULER_CONFIG_NAME
lowerCamelCase : Dict = []
lowerCamelCase : Any = True
@classmethod
def _a ( cls : Any , __SCREAMING_SNAKE_CASE : Dict[str, Any] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Dict=False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =cls.load_config(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , return_commit_hash=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
return cls.from_config(__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
self.save_config(save_directory=__SCREAMING_SNAKE_CASE , push_to_hub=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _a ( self : Union[str, Any] ) -> Any:
return self._get_compatibles()
@classmethod
def _a ( cls : int ) -> Tuple:
__UpperCAmelCase =list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase =importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase =[
getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 68
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a_ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
a_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a_ = dict(zip(vocab, range(len(vocab))))
a_ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = Path(tmpdirname)
a_ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
a_ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
a_ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
a_ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a_ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a_ = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
a_ = tokenizer(['Making tiny model'], return_tensors='pt')
a_ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 296
| 0
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_lowercase =PhobertTokenizer
_lowercase =False
def __a ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = ["T@@", "i", "I", "R@@", "r", "e@@"]
lowerCAmelCase_ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "l à</w>"]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def __a ( self , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = "Tôi là VinAI Research"
lowerCAmelCase_ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def __a ( self ) -> List[str]:
lowerCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "Tôi là VinAI Research"
lowerCAmelCase_ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
lowerCAmelCase_ = tokenizer.tokenize(snake_case__ )
print(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
| 719
|
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=None , ):
__UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 20}
__UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else {"height": 18, "width": 18}
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : str = min_resolution
__UpperCAmelCase : List[str] = max_resolution
__UpperCAmelCase : Optional[Any] = do_resize
__UpperCAmelCase : int = size
__UpperCAmelCase : Dict = do_center_crop
__UpperCAmelCase : Optional[Any] = crop_size
def _snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "crop_size" ) )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Any = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
# Initialize image_processing
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 168
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase : int = len(lowerCamelCase__ )
# We need to create solution object to save path.
__UpperCAmelCase : List[str] = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
__UpperCAmelCase : Optional[Any] = run_maze(lowerCamelCase__ , 0 , 0 , lowerCamelCase__ )
if solved:
print("\n".join(str(lowerCamelCase__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase : str = len(lowerCamelCase__ )
# Final check point.
if i == j == (size - 1):
__UpperCAmelCase : str = 1
return True
__UpperCAmelCase : Any = (not i < 0) and (not j < 0) # Check lower bounds
__UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__UpperCAmelCase : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__UpperCAmelCase : Optional[int] = 1
# check for directions
if (
run_maze(lowerCamelCase__ , i + 1 , lowerCamelCase__ , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , lowerCamelCase__ , j + 1 , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , i - 1 , lowerCamelCase__ , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , lowerCamelCase__ , j - 1 , lowerCamelCase__ )
):
return True
__UpperCAmelCase : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168
| 1
|
'''simple docstring'''
from math import pow, sqrt
def __snake_case (*__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = len(__UpperCAmelCase ) > 0 and all(value > 0.0 for value in values )
return result
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__UpperCAmelCase , __UpperCAmelCase )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 715
|
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : str = "geglu" , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "layer_norm" , UpperCamelCase_ : bool = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : int = only_cross_attention
lowerCamelCase_ : Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
lowerCamelCase_ : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCamelCase_ : Optional[int] = AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ : Tuple = AdaLayerNormZero(UpperCamelCase_ , UpperCamelCase_ )
else:
lowerCamelCase_ : Any = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
lowerCamelCase_ : Tuple = Attention(
query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCamelCase_ : List[str] = (
AdaLayerNorm(UpperCamelCase_ , UpperCamelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
)
lowerCamelCase_ : List[str] = Attention(
query_dim=UpperCamelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , dropout=UpperCamelCase_ , bias=UpperCamelCase_ , upcast_attention=UpperCamelCase_ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : List[str] = None
# 3. Feed-forward
lowerCamelCase_ : Union[str, Any] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = FeedForward(UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn=UpperCamelCase_ , final_dropout=UpperCamelCase_ )
# let chunk size default to None
lowerCamelCase_ : int = None
lowerCamelCase_ : str = 0
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
lowerCamelCase_ : int = chunk_size
lowerCamelCase_ : Dict = dim
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[torch.LongTensor] = None , UpperCamelCase_ : Dict[str, Any] = None , UpperCamelCase_ : Optional[torch.LongTensor] = None , ) -> Dict:
"""simple docstring"""
if self.use_ada_layer_norm:
lowerCamelCase_ : int = self.norma(UpperCamelCase_ , UpperCamelCase_ )
elif self.use_ada_layer_norm_zero:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Any = self.norma(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=hidden_states.dtype )
else:
lowerCamelCase_ : Optional[Any] = self.norma(UpperCamelCase_ )
lowerCamelCase_ : str = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCamelCase_ : int = self.attna(
UpperCamelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ : str = gate_msa.unsqueeze(1 ) * attn_output
lowerCamelCase_ : Tuple = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCamelCase_ : List[Any] = (
self.norma(UpperCamelCase_ , UpperCamelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase_ )
)
lowerCamelCase_ : Tuple = self.attna(
UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : str = attn_output + hidden_states
# 3. Feed-forward
lowerCamelCase_ : Tuple = self.norma(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ : str = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
lowerCamelCase_ : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCamelCase_ : Optional[int] = torch.cat(
[self.ff(UpperCamelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCamelCase_ : Optional[Any] = self.ff(UpperCamelCase_ )
if self.use_ada_layer_norm_zero:
lowerCamelCase_ : List[str] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCamelCase_ : Optional[int] = ff_output + hidden_states
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 4 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : str = "geglu" , UpperCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Tuple = int(dim * mult )
lowerCamelCase_ : List[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCamelCase_ : Optional[int] = GELU(UpperCamelCase_ , UpperCamelCase_ )
if activation_fn == "gelu-approximate":
lowerCamelCase_ : Any = GELU(UpperCamelCase_ , UpperCamelCase_ , approximate='''tanh''' )
elif activation_fn == "geglu":
lowerCamelCase_ : Tuple = GEGLU(UpperCamelCase_ , UpperCamelCase_ )
elif activation_fn == "geglu-approximate":
lowerCamelCase_ : Union[str, Any] = ApproximateGELU(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Any = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase_ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase_ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase_ ) )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
for module in self.net:
lowerCamelCase_ : Optional[int] = module(UpperCamelCase_ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str = "none" ) -> int:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : int = approximate
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.proj(UpperCamelCase_ )
lowerCamelCase_ : int = self.gelu(UpperCamelCase_ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Optional[Any] = nn.Linear(UpperCamelCase_ , dim_out * 2 )
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(UpperCamelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : int = self.proj(UpperCamelCase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase_ )
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> List[str]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : List[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = self.proj(UpperCamelCase_ )
return x * torch.sigmoid(1.702 * x )
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Tuple = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Tuple = nn.SiLU()
lowerCamelCase_ : List[str] = nn.Linear(UpperCamelCase_ , embedding_dim * 2 )
lowerCamelCase_ : List[Any] = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.linear(self.silu(self.emb(UpperCamelCase_ ) ) )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = torch.chunk(UpperCamelCase_ , 2 )
lowerCamelCase_ : List[Any] = self.norm(UpperCamelCase_ ) * (1 + scale) + shift
return x
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Tuple = CombinedTimestepLabelEmbeddings(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : List[Any] = nn.SiLU()
lowerCamelCase_ : str = nn.Linear(UpperCamelCase_ , 6 * embedding_dim , bias=UpperCamelCase_ )
lowerCamelCase_ : Dict = nn.LayerNorm(UpperCamelCase_ , elementwise_affine=UpperCamelCase_ , eps=1e-6 )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int=None ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.linear(self.silu(self.emb(UpperCamelCase_ , UpperCamelCase_ , hidden_dtype=UpperCamelCase_ ) ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = emb.chunk(6 , dim=1 )
lowerCamelCase_ : Tuple = self.norm(UpperCamelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : float = 1e-5 ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : str = num_groups
lowerCamelCase_ : List[Any] = eps
if act_fn is None:
lowerCamelCase_ : Any = None
else:
lowerCamelCase_ : List[str] = get_activation(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = nn.Linear(UpperCamelCase_ , out_dim * 2 )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.act:
lowerCamelCase_ : Optional[int] = self.act(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = self.linear(UpperCamelCase_ )
lowerCamelCase_ : List[str] = emb[:, :, None, None]
lowerCamelCase_ , lowerCamelCase_ : int = emb.chunk(2 , dim=1 )
lowerCamelCase_ : List[str] = F.group_norm(UpperCamelCase_ , self.num_groups , eps=self.eps )
lowerCamelCase_ : Optional[Any] = x * (1 + scale) + shift
return x
| 418
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Optional[Any] = 'deberta-v2'
def __init__( self , __a=128100 , __a=1536 , __a=24 , __a=24 , __a=6144 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=0 , __a=0.02 , __a=1E-7 , __a=False , __a=-1 , __a=0 , __a=True , __a=None , __a=0 , __a="gelu" , **__a , ):
super().__init__(**__a )
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Dict = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Dict = relative_attention
__lowerCamelCase : Tuple = max_relative_positions
__lowerCamelCase : Optional[int] = pad_token_id
__lowerCamelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(__a ) == str:
__lowerCamelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('|' )]
__lowerCamelCase : int = pos_att_type
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Any = layer_norm_eps
__lowerCamelCase : Union[str, Any] = kwargs.get('pooler_hidden_size' , __a )
__lowerCamelCase : Any = pooler_dropout
__lowerCamelCase : str = pooler_hidden_act
class __lowercase( lowercase__ ):
'''simple docstring'''
@property
def snake_case_ ( self ):
if self.task == "multiple-choice":
__lowerCamelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case_ ( self ):
return 12
def snake_case_ ( self , __a , __a = -1 , __a = -1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 40 , __a = 40 , __a = None , ):
__lowerCamelCase : Dict = super().generate_dummy_inputs(preprocessor=__a , framework=__a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 594
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar('''T''')
class __lowercase( Generic[T] ):
'''simple docstring'''
__a : deque[T] # Cache store of keys
__a : set[T] # References of the keys in cache
__a : int = 10 # Maximum capacity of cache
def __init__( self , __a ):
__lowerCamelCase : List[str] = deque()
__lowerCamelCase : Tuple = set()
if not n:
__lowerCamelCase : Any = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__lowerCamelCase : int = n
def snake_case_ ( self , __a ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCamelCase : Tuple = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def snake_case_ ( self ):
for k in self.dq_store:
print(__a )
def __repr__( self ):
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 594
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__UpperCAmelCase : Optional[int] = tf_top_k_top_p_filtering(snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__UpperCAmelCase : List[str] = output[output != -float('''inf''' )]
__UpperCAmelCase : Union[str, Any] = tf.cast(
tf.where(tf.not_equal(snake_case , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(snake_case , snake_case , rtol=1E-12 )
tf.debugging.assert_equal(snake_case , snake_case )
@require_tf
class a ( unittest.TestCase , _a ):
"""simple docstring"""
if is_tf_available():
SCREAMING_SNAKE_CASE : Tuple = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
# TF-only test: tf.saved_model export
__UpperCAmelCase : Optional[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : Union[str, Any] = 2
class a ( tf.Module ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : List[Any] ) -> List[str]:
super(snake_case , self ).__init__()
__UpperCAmelCase : int = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=snake_case , )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Dict , snake_case : List[Any] ) -> str:
__UpperCAmelCase : Dict = self.model.generate(
input_ids=snake_case , attention_mask=snake_case , max_new_tokens=snake_case , return_dict_in_generate=snake_case , )
return {"sequences": outputs["sequences"]}
__UpperCAmelCase : Tuple = [[2, 0], [102, 103]]
__UpperCAmelCase : List[str] = [[1, 0], [1, 1]]
__UpperCAmelCase : Any = DummyModel(model=snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case , snake_case , signatures={'''serving_default''': dummy_model.serving} )
__UpperCAmelCase : Optional[Any] = tf.saved_model.load(snake_case ).signatures['''serving_default''']
for batch_size in range(1 , len(snake_case ) + 1 ):
__UpperCAmelCase : List[Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
__UpperCAmelCase : Union[str, Any] = serving_func(**snake_case )['''sequences''']
__UpperCAmelCase : Tuple = test_model.generate(**snake_case , max_new_tokens=snake_case )
tf.debugging.assert_equal(snake_case , snake_case )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
# TF-only test: tf.saved_model export
__UpperCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Optional[int] = 2
class a ( tf.Module ):
"""simple docstring"""
def __init__( self : Dict , snake_case : Optional[Any] ) -> int:
super(snake_case , self ).__init__()
__UpperCAmelCase : List[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=snake_case , )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : List[str] ) -> Any:
__UpperCAmelCase : Dict = self.model.generate(
input_ids=snake_case , attention_mask=snake_case , max_new_tokens=snake_case , return_dict_in_generate=snake_case , )
return {"sequences": outputs["sequences"]}
__UpperCAmelCase : int = [[2], [102, 103]]
__UpperCAmelCase : List[str] = [[1], [1, 1]]
__UpperCAmelCase : int = DummyModel(model=snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(snake_case , snake_case , signatures={'''serving_default''': dummy_model.serving} )
__UpperCAmelCase : Optional[Any] = tf.saved_model.load(snake_case ).signatures['''serving_default''']
for input_row in range(len(snake_case ) ):
__UpperCAmelCase : Tuple = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
__UpperCAmelCase : List[Any] = serving_func(**snake_case )['''sequences''']
__UpperCAmelCase : Optional[Any] = test_model.generate(**snake_case , max_new_tokens=snake_case )
tf.debugging.assert_equal(snake_case , snake_case )
@slow
@require_tensorflow_text
def lowerCamelCase__ ( self : List[Any] ) -> Any:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=snake_case )
class a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
super().__init__()
__UpperCAmelCase : List[str] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(snake_case , '''spiece.model''' ) , '''rb''' ).read() )
__UpperCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCamelCase__ ( self : Tuple , snake_case : Union[str, Any] , *snake_case : Any , **snake_case : Any ) -> Tuple:
__UpperCAmelCase : List[str] = self.tokenizer.tokenize(snake_case )
__UpperCAmelCase : int = text.pad_model_inputs(
snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__UpperCAmelCase : Any = self.model.generate(input_ids=snake_case , attention_mask=snake_case )
return self.tokenizer.detokenize(snake_case )
__UpperCAmelCase : str = CompleteSentenceTransformer()
__UpperCAmelCase : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
__UpperCAmelCase : Union[str, Any] = complete_model(snake_case )
__UpperCAmelCase : List[str] = tf.keras.Model(snake_case , snake_case )
keras_model.save(snake_case )
def lowerCamelCase__ ( self : int ) -> int:
# Has PT equivalent: this test relies on random sampling
__UpperCAmelCase : Tuple = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
__UpperCAmelCase : List[str] = 14
__UpperCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase : Optional[int] = '''Hello, my dog is cute and'''
__UpperCAmelCase : List[str] = tokenizer(snake_case , return_tensors='''tf''' )
__UpperCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase : Any = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__UpperCAmelCase : Union[str, Any] = model.generate(**snake_case , eos_token_id=snake_case , **snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__UpperCAmelCase : Dict = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
__UpperCAmelCase : str = model.generate(**snake_case , eos_token_id=snake_case , **snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCamelCase__ ( self : Dict ) -> str:
# Has PT equivalent: ample use of framework-specific code
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__UpperCAmelCase : Any = '''Hugging Face is a technology company based in New York and Paris.'''
__UpperCAmelCase : List[str] = bart_tokenizer(snake_case , return_tensors='''tf''' ).input_ids
__UpperCAmelCase : List[str] = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__UpperCAmelCase : str = bart_model.generate(snake_case ).numpy()
class a ( _a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any , snake_case : Dict , snake_case : List[str]=None , **snake_case : Union[str, Any] ) -> Union[str, Any]:
return super().call(snake_case , **snake_case )
__UpperCAmelCase : Dict = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
__UpperCAmelCase : str = bart_model.generate(snake_case , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(snake_case , snake_case ) )
class a ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[Any] , **snake_case : List[Any] ) -> Any:
return super().call(snake_case , **snake_case )
__UpperCAmelCase : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
__UpperCAmelCase : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__UpperCAmelCase : str = bart_model.generate(snake_case ).numpy()
with self.assertRaises(snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(snake_case , foo='''bar''' )
| 705
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Any] , snake_case : int , snake_case : int ) -> List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__UpperCAmelCase : str = img
__UpperCAmelCase : List[Any] = img.shape[1]
__UpperCAmelCase : Optional[Any] = img.shape[0]
__UpperCAmelCase : Dict = dst_width
__UpperCAmelCase : List[str] = dst_height
__UpperCAmelCase : Union[str, Any] = self.src_w / self.dst_w
__UpperCAmelCase : List[str] = self.src_h / self.dst_h
__UpperCAmelCase : Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase__ ( self : Any ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__UpperCAmelCase : Any = self.img[self.get_y(snake_case )][self.get_x(snake_case )]
def lowerCamelCase__ ( self : int , snake_case : int ) -> int:
return int(self.ratio_x * x )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase :int = 8_0_0, 6_0_0
__UpperCAmelCase :Dict = imread("image_data/lena.jpg", 1)
__UpperCAmelCase :int = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 266
| 0
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), )
return model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.dummy_uncond_unet
_lowerCAmelCase : Optional[Any] = PNDMScheduler()
_lowerCAmelCase : Optional[Any] = PNDMPipeline(unet=__a, scheduler=__a)
pndm.to(__a)
pndm.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Dict = torch.manual_seed(0)
_lowerCAmelCase : Tuple = pndm(generator=__a, num_inference_steps=20, output_type="numpy").images
_lowerCAmelCase : Dict = torch.manual_seed(0)
_lowerCAmelCase : str = pndm(generator=__a, num_inference_steps=20, output_type="numpy", return_dict=__a)[0]
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = "google/ddpm-cifar10-32"
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(__a)
_lowerCAmelCase : Dict = PNDMScheduler()
_lowerCAmelCase : Tuple = PNDMPipeline(unet=__a, scheduler=__a)
pndm.to(__a)
pndm.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[Any] = torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = pndm(generator=__a, output_type="numpy").images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 500
|
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [False] * len(_lowerCamelCase )
_lowerCAmelCase : str = []
queue.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = True
while queue:
_lowerCAmelCase : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[str] = u
return visited[t]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [-1] * (len(_lowerCamelCase ))
_lowerCAmelCase : Tuple = 0
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = float("Inf" )
_lowerCAmelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : Optional[int] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCAmelCase : List[Any] = parent[s]
max_flow += path_flow
_lowerCAmelCase : Optional[Any] = sink
while v != source:
_lowerCAmelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : Optional[Any] = parent[v]
return max_flow
_snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_snake_case, _snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 500
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 7_68 , ):
"""simple docstring"""
super().__init__()
A =nn.Parameter(torch.zeros(1 , snake_case__ ) )
A =nn.Parameter(torch.ones(1 , snake_case__ ) )
def _a ( self : List[Any] , snake_case__ : Optional[Union[str, torch.device]] = None , snake_case__ : Optional[torch.dtype] = None , ):
"""simple docstring"""
A =nn.Parameter(self.mean.to(snake_case__ ).to(snake_case__ ) )
A =nn.Parameter(self.std.to(snake_case__ ).to(snake_case__ ) )
return self
def _a ( self : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =(embeds - self.mean) * 1.0 / self.std
return embeds
def _a ( self : Dict , snake_case__ : str ):
"""simple docstring"""
A =(embeds * self.std) + self.mean
return embeds
| 689
|
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689
| 1
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__UpperCAmelCase ='EncodecFeatureExtractor'
__UpperCAmelCase =('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , _UpperCamelCase , _UpperCamelCase )-> str:
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
_A = self.feature_extractor
_A = False
def UpperCamelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ )
def __call__( self , *_UpperCamelCase , **_UpperCamelCase )-> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
_A = kwargs.pop('audio' , UpperCamelCase__ )
_A = kwargs.pop('sampling_rate' , UpperCamelCase__ )
_A = kwargs.pop('text' , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_A = args[0]
_A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
_A = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if audio is not None:
_A = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_A = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_A = audio_inputs['padding_mask']
return inputs
def UpperCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase )-> str:
_A = kwargs.pop('audio' , UpperCamelCase__ )
_A = kwargs.pop('padding_mask' , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_A = args[0]
_A = args[1:]
if audio_values is not None:
return self._decode_audio(UpperCamelCase__ , padding_mask=UpperCamelCase__ )
else:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase )-> Any:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None )-> Optional[Any]:
_A = to_numpy(UpperCamelCase__ )
_A , _A , _A = audio_values.shape
if padding_mask is None:
return list(UpperCamelCase__ )
_A = to_numpy(UpperCamelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_A = seq_len - padding_mask.shape[-1]
_A = 1 - self.feature_extractor.padding_value
_A = np.pad(UpperCamelCase__ , ((0, 0), (0, difference)) , 'constant' , constant_values=UpperCamelCase__ )
_A = audio_values.tolist()
for i in range(UpperCamelCase__ ):
_A = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_A = sliced_audio.reshape(UpperCamelCase__ , -1 )
return audio_values
| 292
|
"""simple docstring"""
from __future__ import annotations
import math
def __a ( a, a ):
"""simple docstring"""
_a = u
for i in range(1, a ):
_a = temp * (u - i)
return temp
def __a ( ):
"""simple docstring"""
_a = int(input("enter the numbers of values: " ) )
_a = []
for _ in range(a ):
y.append([] )
for i in range(a ):
for j in range(a ):
y[i].append(a )
_a = 0
print("enter the values of parameters in a list: " )
_a = list(map(a, input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a ):
_a = float(input() )
_a = int(input("enter the value to interpolate: " ) )
_a = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, a ):
for j in range(n - i ):
_a = y[j + 1][i - 1] - y[j][i - 1]
_a = y[0][0]
for i in range(1, a ):
summ += (ucal(a, a ) * y[0][i]) / math.factorial(a )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 388
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = 384
if "tiny" in model_name:
a_ = [3, 3, 9, 3]
a_ = [96, 192, 384, 768]
if "small" in model_name:
a_ = [3, 3, 27, 3]
a_ = [96, 192, 384, 768]
if "base" in model_name:
a_ = [3, 3, 27, 3]
a_ = [128, 256, 512, 1_024]
a_ = 512
if "large" in model_name:
a_ = [3, 3, 27, 3]
a_ = [192, 384, 768, 1_536]
a_ = 768
if "xlarge" in model_name:
a_ = [3, 3, 27, 3]
a_ = [256, 512, 1_024, 2_048]
a_ = 1_024
# set label information
a_ = 150
a_ = "huggingface/label-files"
a_ = "ade20k-id2label.json"
a_ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
a_ = {int(_snake_case ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = ConvNextConfig(
depths=_snake_case , hidden_sizes=_snake_case , out_features=["stage1", "stage2", "stage3", "stage4"] )
a_ = UperNetConfig(
backbone_config=_snake_case , auxiliary_in_channels=_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = dct.pop(_snake_case )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
a_ = model_name_to_url[model_name]
a_ = torch.hub.load_state_dict_from_url(_snake_case , map_location="cpu" )["state_dict"]
a_ = get_upernet_config(_snake_case )
a_ = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a_ = state_dict.pop(_snake_case )
if "bn" in key:
a_ = key.replace("bn" , "batch_norm" )
a_ = val
# rename keys
a_ = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
model.load_state_dict(_snake_case )
# verify on image
a_ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a_ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
a_ = SegformerImageProcessor()
a_ = processor(_snake_case , return_tensors="pt" ).pixel_values
with torch.no_grad():
a_ = model(_snake_case )
if model_name == "upernet-convnext-tiny":
a_ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
a_ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
a_ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
a_ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
a_ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 708
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->list:
"""simple docstring"""
a_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
a_ = True
for i in range(0 , len(UpperCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
a_ , a_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
a_ = False
for i in range(1 , len(UpperCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
a_ , a_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
a_ = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase_ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 210
| 0
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def __lowerCAmelCase ( a_ , a_ , a_ ) -> List[str]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , a_ )
SCREAMING_SNAKE_CASE : int = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE : int = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Dict = is_small_dataset(a_ )
assert result == expected
| 251
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Tuple = FlaxAutoencoderKL
@property
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : str = (32, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Dict = jax.random.uniform(lowercase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_input
return init_dict, inputs_dict
| 251
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def A_ ( ) ->List[Any]:
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase_ = parser.parse_args()
return args.f
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCamelCase__ )
lowercase_ = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase__ )
lowercase_ = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCamelCase__ )
| 718
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = [False] * 26
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) ->None:
from timeit import timeit
lowercase_ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_faster()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_fastest()""" , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 603
| 0
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE__ = 2_9_9_7_9_2_4_5_8
# Symbols
SCREAMING_SNAKE_CASE__ = symbols('ct x y z')
def lowercase__ ( __UpperCamelCase )-> Dict:
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def lowercase__ ( __UpperCamelCase )-> List[Any]:
return 1 / sqrt(1 - beta(_lowerCAmelCase ) ** 2 )
def lowercase__ ( __UpperCamelCase )-> Tuple:
return np.array(
[
[gamma(_lowerCAmelCase ), -gamma(_lowerCAmelCase ) * beta(_lowerCAmelCase ), 0, 0],
[-gamma(_lowerCAmelCase ) * beta(_lowerCAmelCase ), gamma(_lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase = None )-> Optional[Any]:
# Ensure event is not empty
if event is None:
UpperCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE__ = transform(2_9_9_7_9_2_4_5)
print('Example of four vector: ')
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE__ = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 301
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[int] = {"tokenizer_file": "tokenizer.json"}
lowercase : Any = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = ['input_ids', 'attention_mask']
lowercase : Optional[int] = None
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<unk>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<pad>" , __UpperCamelCase=False , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , **__UpperCamelCase , )
__UpperCamelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
__UpperCamelCase : Optional[int] = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
__UpperCamelCase : int = add_prefix_space
__UpperCamelCase : List[Any] = pre_tok_class(**__UpperCamelCase )
__UpperCamelCase : List[Any] = add_prefix_space
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : int = kwargs.get("is_split_into_words" , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
__UpperCamelCase : Any = kwargs.get("is_split_into_words" , __UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[int]:
'''simple docstring'''
__UpperCamelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
__UpperCamelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 327
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : str , _A : int=None , **_A : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _A , )
super().__init__(args=_A , **_A )
| 534
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
__magic_name__: Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
snake_case_ : Optional[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser'},
] , )
snake_case_ : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Tuple = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
snake_case_ : Tuple = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
snake_case_ : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
snake_case_ : Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
] , )
snake_case_ : List[Any] = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
snake_case_ : Tuple = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_A , _A )
@slow
@require_torch
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_A )
@slow
@require_tf
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : Dict = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_A )
def UpperCAmelCase_ ( self : Dict , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A ) , [
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] , )
snake_case_ : List[Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 12790,
'token_str': ' Lyon',
},
] , )
snake_case_ : Tuple = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_A ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
snake_case_ : Tuple = None
snake_case_ : str = None
self.run_pipeline_test(_A , [] )
@require_tf
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
snake_case_ : List[str] = None
snake_case_ : List[str] = None
self.run_pipeline_test(_A , [] )
def UpperCAmelCase_ ( self : List[str] , _A : List[Any] , _A : Tuple , _A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : Optional[Any] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def UpperCAmelCase_ ( self : Optional[Any] , _A : str , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = fill_masker.tokenizer
snake_case_ : List[Any] = fill_masker.model
snake_case_ : int = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Dict = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Optional[int] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_A , [
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
] , )
with self.assertRaises(_A ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_A ):
fill_masker('This is' )
self.run_test_top_k(_A , _A )
self.run_test_targets(_A , _A )
self.run_test_top_k_targets(_A , _A )
self.fill_mask_with_duplicate_targets_and_top_k(_A , _A )
self.fill_mask_with_multiple_masks(_A , _A )
def UpperCAmelCase_ ( self : Optional[Any] , _A : Any , _A : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : Dict = tokenizer.get_vocab()
snake_case_ : List[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A , targets=_A )
snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : List[str] = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _A )
snake_case_ : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_A ) )
# Call argument
snake_case_ : Dict = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Any = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _A )
snake_case_ : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_A ) )
# Score equivalence
snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A )
snake_case_ : Any = [top_mask['token_str'] for top_mask in outputs]
snake_case_ : Optional[Any] = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ) == set(_A ):
snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_A )
snake_case_ : Union[str, Any] = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
# Raises with invalid
with self.assertRaises(_A ):
snake_case_ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_A ):
snake_case_ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(_A ):
snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='' )
def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : str = FillMaskPipeline(model=_A , tokenizer=_A , top_k=2 )
snake_case_ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
snake_case_ : Any = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_A , [
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] , )
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def UpperCAmelCase_ ( self : Tuple , _A : Any , _A : Dict ) -> str:
"""simple docstring"""
snake_case_ : str = tokenizer.get_vocab()
snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A )
# top_k=2, ntargets=3
snake_case_ : str = sorted(vocab.keys() )[:3]
snake_case_ : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_A )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case_ : Any = [el['token_str'] for el in sorted(_A , key=lambda _A : x["score"] , reverse=_A )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ).issubset(_A ):
snake_case_ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_A )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def UpperCAmelCase_ ( self : str , _A : Dict , _A : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : Tuple = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case_ : str = sorted(vocab.keys() )[:3]
snake_case_ : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case_ : str = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=_A , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_A ) , 3 )
def UpperCAmelCase_ ( self : List[str] , _A : str , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = FillMaskPipeline(model=_A , tokenizer=_A )
snake_case_ : List[str] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_A , [
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
] , )
| 534
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Optional[Any] = '▁'
_snake_case : Dict = {'vocab_file': 'sentencepiece.bpe.model'}
_snake_case : Optional[int] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_snake_case : int = {
'xlm-roberta-base': 5_12,
'xlm-roberta-large': 5_12,
'xlm-roberta-large-finetuned-conll02-dutch': 5_12,
'xlm-roberta-large-finetuned-conll02-spanish': 5_12,
'xlm-roberta-large-finetuned-conll03-english': 5_12,
'xlm-roberta-large-finetuned-conll03-german': 5_12,
}
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ =["""input_ids""", """attention_mask"""]
def __init__( self, _a, _a="<s>", _a="</s>", _a="</s>", _a="<s>", _a="<unk>", _a="<pad>", _a="<mask>", _a = None, **_a, ) -> Dict:
__SCREAMING_SNAKE_CASE = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__, eos_token=SCREAMING_SNAKE_CASE__, unk_token=SCREAMING_SNAKE_CASE__, sep_token=SCREAMING_SNAKE_CASE__, cls_token=SCREAMING_SNAKE_CASE__, pad_token=SCREAMING_SNAKE_CASE__, mask_token=SCREAMING_SNAKE_CASE__, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE__, )
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__SCREAMING_SNAKE_CASE = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = len(self.sp_model ) + self.fairseq_offset
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, _a ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self, _a, _a = None ) -> Union[str, Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self, _a, _a = None, _a = False ) -> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__, token_ids_a=SCREAMING_SNAKE_CASE__, already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCAmelCase ( self, _a, _a = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self, _a ) -> str:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__, out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self, _a ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self, _a ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = "".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__, " " ).strip()
return out_string
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE = os.path.join(
SCREAMING_SNAKE_CASE__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__, "wb" ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 693
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
pass
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for shard in shards:
for i in range(__SCREAMING_SNAKE_CASE ):
yield {"i": i, "shard": shard}
def __lowercase ( ) -> Tuple:
"""simple docstring"""
__a = int(os.environ["""RANK"""] )
__a = int(os.environ["""WORLD_SIZE"""] )
__a = ArgumentParser()
parser.add_argument("""--streaming""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--local_rank""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_workers""" , type=__SCREAMING_SNAKE_CASE , default=0 )
__a = parser.parse_args()
__a = args.streaming
__a = args.num_workers
__a = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(__SCREAMING_SNAKE_CASE )]}
__a = IterableDataset.from_generator(__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE )
if not streaming:
__a = Dataset.from_list(list(__SCREAMING_SNAKE_CASE ) )
__a = split_dataset_by_node(__SCREAMING_SNAKE_CASE , rank=__SCREAMING_SNAKE_CASE , world_size=__SCREAMING_SNAKE_CASE )
__a = torch.utils.data.DataLoader(__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE )
__a = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__a = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__a = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 582
| 0
|
from __future__ import annotations
import pandas as pd
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = burst_time[i]
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = 999_999_999
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(a__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE : List[Any] = remaining_time[j]
SCREAMING_SNAKE_CASE : Tuple = j
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE : Dict = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE : Dict = 999_999_999
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE : Any = False
# Find finish time of current process
SCREAMING_SNAKE_CASE : Optional[Any] = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE : Optional[int] = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE : List[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(a__ ):
SCREAMING_SNAKE_CASE : Tuple = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE : List[Any] = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a__ : Any = int(input())
a__ : Dict = [0] * no_of_processes
a__ : Dict = [0] * no_of_processes
a__ : Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a__ : int = map(int, input().split())
a__ : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ : Optional[int] = burst_time
a__ : int = no_of_processes
a__ : Dict = waiting_time
a__ : List[str] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a__ : List[str] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 709
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_stages
SCREAMING_SNAKE_CASE : str = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = depths
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = out_features
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : str = num_stages
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self ) ->List[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : int = UperNetModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[int]:
return
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = _config_zero_init(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
SCREAMING_SNAKE_CASE : int = Image.open(a__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
SCREAMING_SNAKE_CASE : Tuple = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : int = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Any = processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 333
| 0
|
from collections.abc import Callable
import numpy as np
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> np.ndarray:
_a = int(np.ceil((x_end - xa) / step_size ) )
_a = np.zeros((n + 1,) )
_a = ya
_a = xa
for k in range(_UpperCamelCase ):
_a = y[k] + step_size * ode_func(_UpperCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487
|
from jiwer import compute_measures
import datasets
lowerCamelCase :Any = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCamelCase :Any = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCamelCase :List[Any] = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def _A ( self: int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _A ( self: Optional[Any] , __UpperCamelCase: Any=None , __UpperCamelCase: Dict=None , __UpperCamelCase: Tuple=False ):
if concatenate_texts:
return compute_measures(__UpperCamelCase , __UpperCamelCase )["wer"]
else:
_a = 0
_a = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ):
_a = compute_measures(__UpperCamelCase , __UpperCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 487
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __A ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = BioGptModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
A__ = BioGptForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ ):
A__ = BioGptModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# create attention mask
A__ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ )
A__ = self.seq_length // 2
A__ = 0
# first forward pass
A__ , A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ = random_other_next_tokens
# append to next input_ids and attn_mask
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , )
# get two different outputs
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"]
A__ = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ ):
A__ = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval()
A__ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ )
# first forward pass
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
A__ , A__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"]
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[
"last_hidden_state"
]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , UpperCAmelCase__=False ):
A__ = BioGptForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __A ( self , UpperCAmelCase__ , *UpperCAmelCase__ ):
A__ = BioGptModel(UpperCAmelCase__ )
A__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ ):
A__ = self.num_labels
A__ = BioGptForTokenClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase : Tuple = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
def __A ( self ):
A__ = BioGptModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ )
@slow
def __A ( self ):
A__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(UpperCAmelCase__ )
A__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
A__ = "left"
# Define PAD Token = EOS Token = 50256
A__ = tokenizer.eos_token
A__ = model.config.eos_token_id
# use different length sentences to test batching
A__ = [
"Hello, my dog is a little",
"Today, I",
]
A__ = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ )
A__ = inputs["input_ids"].to(UpperCAmelCase__ )
A__ = model.generate(
input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , )
A__ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ )
A__ = model.generate(input_ids=UpperCAmelCase__ )
A__ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
A__ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ )
A__ = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings )
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ )
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ )
A__ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def __A ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = BioGptModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = BioGptForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "multi_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ = BioGptForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def __A ( self ):
A__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
A__ = torch.tensor([[2, 4_805, 9, 656, 21]] )
A__ = model(UpperCAmelCase__ )[0]
A__ = 42_384
A__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
A__ = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
A__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(UpperCAmelCase__ )
torch.manual_seed(0 )
A__ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ )
A__ = model.generate(
**UpperCAmelCase__ , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=UpperCAmelCase__ , )
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ )
A__ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 715
|
from math import pi, sqrt
def UpperCamelCase ( _A : float )-> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(_A ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(_A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def UpperCamelCase ( )-> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Optional[Any] = 1.0
while num:
UpperCAmelCase_ : str = float(input("Gamma of: "))
print(F'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 232
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : str = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
SCREAMING_SNAKE_CASE : Any = spec.loader.load_module()
SCREAMING_SNAKE_CASE : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
SCREAMING_SNAKE_CASE : Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowercase : Any = False
# source code of `config_class`
_lowercase : str = inspect.getsource(lowerCamelCase_ )
_lowercase : Optional[Any] = _re_checkpoint.findall(lowerCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowercase , _lowercase : List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowercase : Union[str, Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowercase : List[Any] = True
break
_lowercase : List[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Union[str, Any] = '\n'.join(sorted(lowerCamelCase_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 89
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ : Tuple = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ,**lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,**lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : int = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ ,max_length=len(lowerCAmelCase__ ) ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowerCAmelCase_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIn("input_ids" ,lowerCAmelCase__ )
self.assertIn("attention_mask" ,lowerCAmelCase__ )
self.assertNotIn("labels" ,lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[int] = tokenizer(text_target=lowerCAmelCase__ ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["A long paragraph for summarization."]
lowerCAmelCase_ : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : Optional[Any] = tokenizer(text_target=lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : List[str] = inputs["input_ids"]
lowerCAmelCase_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : str = ["Summary of the text.", "Another summary."]
lowerCAmelCase_ : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
lowerCAmelCase_ : Optional[int] = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ : Tuple = tokenizer_r.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 659
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( UpperCamelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __lowercase ( lowercase) -> str:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 712
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = tempfile.mkdtemp()
a__ : Tuple = 5
# Realm tok
a__ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a__ : Any = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(lowercase , exist_ok=lowercase)
a__ : int = os.path.join(lowercase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
a__ : List[str] = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(lowercase , exist_ok=lowercase)
def __lowercase ( self) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : int = RealmConfig(num_block_records=self.num_block_records)
return config
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Tuple = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=lowercase , )
return block_records
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[Any] = self.get_config()
a__ : Tuple = self.get_dummy_retriever()
a__ : Tuple = retriever.tokenizer
a__ : str = np.array([0, 3] , dtype='long')
a__ : Optional[int] = tokenizer(['Test question']).input_ids
a__ : List[str] = tokenizer(
['the fourth'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : str = config.reader_seq_len
a__ , a__ , a__ , a__ : int = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(len(lowercase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : List[str] = self.get_config()
a__ : Union[str, Any] = self.get_dummy_retriever()
a__ : List[Any] = retriever.tokenizer
a__ : Any = np.array([0, 3, 5] , dtype='long')
a__ : Tuple = tokenizer(['Test question']).input_ids
a__ : Optional[Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , ).input_ids
a__ : Dict = config.reader_seq_len
a__ , a__ , a__ , a__ : Dict = retriever(
lowercase , lowercase , answer_ids=lowercase , max_length=lowercase , return_tensors='np')
self.assertEqual([False, True, True] , lowercase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowercase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
a__ : Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , b'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
a__ : str = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
a__ : str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , b'This is the first record')
| 392
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {"latents"}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
lowercase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__ : Tuple = CLIPTextModel(_lowercase )
lowercase__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> int:
lowercase__ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowercase__ : List[str] = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
lowercase__ : str = torch.manual_seed(_lowercase )
else:
lowercase__ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ : Dict = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Tuple = self.get_dummy_components()
lowercase__ : Optional[int] = CycleDiffusionPipeline(**_lowercase )
lowercase__ : Dict = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(_lowercase )
lowercase__ : Optional[Any] = pipe(**_lowercase )
lowercase__ : Optional[Any] = output.images
lowercase__ : Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ : Dict = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
lowercase__ : Optional[int] = module.half()
lowercase__ : List[Any] = CycleDiffusionPipeline(**_lowercase )
lowercase__ : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ : List[str] = self.get_dummy_inputs(_lowercase )
lowercase__ : Optional[Any] = pipe(**_lowercase )
lowercase__ : str = output.images
lowercase__ : int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ : Tuple = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCAmelCase( self ) -> Optional[Any]:
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowerCAmelCase( self ) -> List[str]:
return super().test_inference_batch_single_identical()
@skip_mps
def _lowerCAmelCase( self ) -> Union[str, Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCAmelCase( self ) -> Tuple:
return super().test_save_load_optional_components()
@skip_mps
def _lowerCAmelCase( self ) -> int:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowercase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
lowercase__ : str = init_image.resize((512, 512) )
lowercase__ : str = """CompVis/stable-diffusion-v1-4"""
lowercase__ : Tuple = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
lowercase__ : str = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowercase__ : List[str] = """A black colored car"""
lowercase__ : List[str] = """A blue colored car"""
lowercase__ : Tuple = torch.manual_seed(0 )
lowercase__ : Any = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
lowercase__ : Tuple = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
lowercase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
lowercase__ : Optional[int] = init_image.resize((512, 512) )
lowercase__ : Union[str, Any] = """CompVis/stable-diffusion-v1-4"""
lowercase__ : Optional[Any] = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
lowercase__ : Optional[Any] = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowercase__ : Optional[Any] = """A black colored car"""
lowercase__ : int = """A blue colored car"""
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : Dict = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
lowercase__ : Any = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 152
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 581
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : int = OpenAIGPTTokenizer
_a : Tuple = OpenAIGPTTokenizerFast
_a : Tuple = True
_a : int = False
def UpperCAmelCase__( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowercase__ : str = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowercase__ : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
return "lower newer", "lower newer"
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ : Union[str, Any] = """lower"""
lowercase__ : Tuple = ["""low""", """er</w>"""]
lowercase__ : Any = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Union[str, Any] = tokens + ["""<unk>"""]
lowercase__ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# Simple input
lowercase__ : Dict = """This is a simple input"""
lowercase__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase__ : str = ("""This is a simple input""", """This is a pair""")
lowercase__ : Tuple = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , )
def UpperCAmelCase__( self ) -> Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
pass
| 708
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__snake_case = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase__ : List[Any] = """lm_head"""
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowercase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ : Dict = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : str = value
elif weight_type == "bias":
lowercase__ : int = value
else:
lowercase__ : Optional[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[str] ):
lowercase__ : Tuple = []
lowercase__ : Dict = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ : int = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase__ : Dict = True
if "*" in mapped_key:
lowercase__ : List[str] = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowercase__ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
lowercase__ : Any = """weight_g"""
elif "weight_v" in name:
lowercase__ : Any = """weight_v"""
elif "bias" in name:
lowercase__ : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : str = """weight"""
else:
lowercase__ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
lowercase__ : Dict = full_name.split("""conv_layers.""" )[-1]
lowercase__ : Union[str, Any] = name.split(""".""" )
lowercase__ : List[Any] = int(items[0] )
lowercase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : List[Any]=True ):
if config_path is not None:
lowercase__ : int = UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
lowercase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase__ : Any = Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : int = target_dict.pad_index
lowercase__ : Tuple = target_dict.bos_index
lowercase__ : Dict = target_dict.eos_index
lowercase__ : Dict = len(target_dict.symbols )
lowercase__ : List[Any] = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowercase__ : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : Any = 42
lowercase__ : List[str] = 43
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
lowercase__ : List[str] = True if config.feat_extract_norm == """layer""" else False
lowercase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowercase__ : Any = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowercase__ : Any = UniSpeechForCTC(lowerCamelCase__ )
else:
lowercase__ : Dict = UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowercase__ , lowercase__ , lowercase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase__ : str = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__snake_case = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 128
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase_ : int = ['''small''', '''medium''', '''large''']
lowerCAmelCase_ : Dict = '''lm_head.decoder.weight'''
lowerCAmelCase_ : str = '''lm_head.weight'''
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = torch.load(lowerCAmelCase_ )
_UpperCAmelCase : str = d.pop(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase_ : Union[str, Any] = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
lowerCAmelCase_ : Dict = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 414
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __A ( lowerCAmelCase_ ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 414
| 1
|
def UpperCamelCase ( _A : int )-> str:
"""simple docstring"""
A__ = int(_A )
if decimal in (0, 1): # Exit cases for the recursion
return str(_A )
A__ , A__ = divmod(_A , 2 )
return binary_recursive(_A ) + str(_A )
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
A__ = str(_A ).strip()
if not number:
raise ValueError("No input value was provided" )
A__ = "-" if number.startswith("-" ) else ""
A__ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"""{negative}0b{binary_recursive(int(_A ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 232
|
UpperCAmelCase_ : List[str] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : Dict = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def UpperCamelCase ( _A : int , _A : int , _A : int )-> str:
"""simple docstring"""
assert len(str(_A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A__ = year // 100
A__ = (5 * (century % 4) + 2) % 7
A__ = year % 100
A__ = centurian % 12
A__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_SCREAMING_SNAKE_CASE = "\\n\n"
_SCREAMING_SNAKE_CASE = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 16 , _lowerCAmelCase = True , _lowerCAmelCase=None ) -> int:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCAmelCase = "cuda"
else:
_lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model.to(_lowerCAmelCase )
_lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_lowerCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCAmelCase = model.config.max_length - 1
else:
_lowerCAmelCase = model.config.max_length
_lowerCAmelCase = tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="pt" , return_attention_mask=_lowerCAmelCase , ).to(_lowerCAmelCase )
_lowerCAmelCase = encodings["input_ids"]
_lowerCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCAmelCase = []
_lowerCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ) ):
_lowerCAmelCase = min(start_index + batch_size , len(_lowerCAmelCase ) )
_lowerCAmelCase = encoded_texts[start_index:end_index]
_lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_lowerCAmelCase )
_lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_lowerCAmelCase ), attn_mask] , dim=1 )
_lowerCAmelCase = encoded_batch
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ).logits
_lowerCAmelCase = out_logits[..., :-1, :].contiguous()
_lowerCAmelCase = labels[..., 1:].contiguous()
_lowerCAmelCase = attn_mask[..., 1:].contiguous()
_lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _lowerCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_lowerCAmelCase )}
| 18
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = """naver-clova-ix/donut-base-finetuned-docvqa"""
__lowerCamelCase : List[Any] = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__lowerCamelCase : str = """document_qa"""
__lowerCamelCase : Union[str, Any] = AutoProcessor
__lowerCamelCase : Optional[int] = VisionEncoderDecoderModel
__lowerCamelCase : Optional[int] = ["""image""", """text"""]
__lowerCamelCase : Any = ["""text"""]
def __init__( self , *a , **a):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.')
super().__init__(*a , **a)
def snake_case_ ( self , a , a):
lowercase__ : List[Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowercase__ : int = task_prompt.replace('{user_input}' , a)
lowercase__ : Dict = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt').input_ids
lowercase__ : Union[str, Any] = self.pre_processor(a , return_tensors='pt').pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case_ ( self , a):
return self.model.generate(
inputs['pixel_values'].to(self.device) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = self.pre_processor.batch_decode(a)[0]
lowercase__ : Tuple = sequence.replace(self.pre_processor.tokenizer.eos_token , '')
lowercase__ : Tuple = sequence.replace(self.pre_processor.tokenizer.pad_token , '')
lowercase__ : str = re.sub(r'<.*?>' , '' , a , count=1).strip() # remove first task start token
lowercase__ : Optional[Any] = self.pre_processor.tokenajson(a)
return sequence["answer"]
| 164
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''mobilenet_v1'''
def __init__( self , lowercase=3 , lowercase=2_2_4 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.999 , lowercase=0.02 , lowercase=0.001 , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
A_ : Any = num_channels
A_ : Any = image_size
A_ : str = depth_multiplier
A_ : Dict = min_depth
A_ : Union[str, Any] = hidden_act
A_ : int = tf_padding
A_ : Dict = classifier_dropout_prob
A_ : Optional[int] = initializer_range
A_ : str = layer_norm_eps
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 1E-4
| 70
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : List[str] = self.convolution(self.padding(lowercase ) )
A_ : List[str] = self.normalization(lowercase )
A_ : List[Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = config.num_channels
A_ : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Optional[int] = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.pooler(lowercase )
for layer_module in self.attention:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[int] = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : Optional[int] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : List[str] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = hidden_state
for layer_module in self.layers:
A_ : int = layer_module(lowercase )
A_ : Union[str, Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Dict = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : str = in_channels != out_channels or stride != 1
A_ : int = max(1 , out_channels // config.groups_width )
A_ : Optional[int] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : List[str] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = hidden_state
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : str = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : Tuple = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Dict = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = config
A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : str = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase )
A_ : Optional[int] = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Dict = encoder_outputs[0]
A_ : List[Any] = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : int = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : List[Any] = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
A_ : List[Any] = self.classifier[0](lowercase )
A_ : Union[str, Any] = self.classifier[1](lowercase )
A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 70
| 1
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Any , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _A ( self :Any , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :int=None ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {}
snake_case_ : int = {}
if prompt is not None:
snake_case_ : Any = prompt
if generate_kwargs is not None:
snake_case_ : Optional[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case_ : Optional[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
snake_case_ : List[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self :Optional[int] , lowerCAmelCase__ :Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int]=None ) -> Dict:
'''simple docstring'''
snake_case_ : Any = load_image(lowerCAmelCase__ )
if prompt is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
F'''Received an invalid text input, got - {type(lowerCAmelCase__ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
snake_case_ : int = self.model.config.model_type
if model_type == "git":
snake_case_ : str = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
snake_case_ : List[Any] = self.tokenizer(text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids
snake_case_ : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
snake_case_ : int = torch.tensor(lowerCAmelCase__ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
snake_case_ : Tuple = self.image_processor(images=lowerCAmelCase__ , header_text=lowerCAmelCase__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case_ : str = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
snake_case_ : Any = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase__ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
snake_case_ : Optional[int] = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case_ : int = None
return model_inputs
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=None ) -> Optional[int]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase__ )
and all(x is None for x in model_inputs["input_ids"] )
):
snake_case_ : List[Any] = None
if generate_kwargs is None:
snake_case_ : str = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case_ : Optional[int] = model_inputs.pop(self.model.main_input_name )
snake_case_ : Optional[int] = self.model.generate(lowerCAmelCase__ , **lowerCAmelCase__ , **lowerCAmelCase__ )
return model_outputs
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for output_ids in model_outputs:
snake_case_ : Union[str, Any] = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , )
}
records.append(lowerCAmelCase__ )
return records
| 653
|
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowercase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase :List[Any] = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__UpperCamelCase :Any = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__UpperCamelCase :List[str] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=SCREAMING_SNAKE_CASE , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=SCREAMING_SNAKE_CASE , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__UpperCamelCase :Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=SCREAMING_SNAKE_CASE , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase :str = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase :Optional[int] = defaults.commands
if not args.tpu_name:
__UpperCamelCase :Tuple = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase :Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase :Any = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__UpperCamelCase :List[Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__UpperCamelCase :Tuple = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Tuple = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase :Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__UpperCamelCase :Optional[int] = '''; '''.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase :Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(SCREAMING_SNAKE_CASE )}""" )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('''Successfully setup pod.''' )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = tpu_command_parser()
__UpperCamelCase :Optional[int] = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 452
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[str] = StableUnCLIPImgaImgPipeline
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : List[Any] = frozenset([] )
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = 32
__UpperCamelCase :int = embedder_hidden_size
# image encoding components
__UpperCamelCase :Dict = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Dict = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :Any = AutoencoderKL()
__UpperCamelCase :Dict = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> Tuple:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :int = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :Tuple = input_image * 0.5 + 0.5
__UpperCamelCase :Any = input_image.clamp(0 , 1)
__UpperCamelCase :Any = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :int = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Dict = self.get_dummy_components()
__UpperCamelCase :Dict = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :int = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :Union[str, Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :str = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :int = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Any = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Tuple = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 452
| 1
|
'''simple docstring'''
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
A_ : Optional[int] = logging.getLogger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''token-classification'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
if type(__SCREAMING_SNAKE_CASE ) == dict:
snake_case__ : Optional[Any] = Namespace(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = import_module("""tasks""" )
try:
snake_case__ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , hparams.task_type )
snake_case__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
snake_case__ : Optional[int] = self.token_classification_task.get_labels(hparams.labels )
snake_case__ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(__SCREAMING_SNAKE_CASE , len(self.labels ) , self.mode )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return self.model(**__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case__ : Any = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case__ : Any = self(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case__ : str = self._feature_file(__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.load(__SCREAMING_SNAKE_CASE )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
snake_case__ : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = self.token_classification_task.convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__SCREAMING_SNAKE_CASE , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
snake_case__ : Optional[int] = self._feature_file(__SCREAMING_SNAKE_CASE )
logger.info("""Loading features from cached file %s""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = torch.load(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case__ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case__ : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case__ : Optional[Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case__ : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , batch_size=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""Compute validation""" ""
snake_case__ : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
snake_case__ : Tuple = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case__ : str = self(**__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ : Optional[Any] = outputs[:2]
snake_case__ : Union[str, Any] = logits.detach().cpu().numpy()
snake_case__ : Optional[int] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
snake_case__ : Tuple = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
snake_case__ : List[Any] = np.argmax(__SCREAMING_SNAKE_CASE , axis=2 )
snake_case__ : Optional[int] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
snake_case__ : List[str] = dict(enumerate(self.labels ) )
snake_case__ : Any = [[] for _ in range(out_label_ids.shape[0] )]
snake_case__ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case__ : int = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
"""precision""": precision_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
"""recall""": recall_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
"""f1""": fa_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
}
snake_case__ : Any = dict(results.items() )
snake_case__ : Dict = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# when stable
snake_case__ , snake_case__ , snake_case__ : Any = self._eval_end(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
# updating to test_epoch_end instead of deprecated test_end
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self._eval_end(__SCREAMING_SNAKE_CASE )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case__ : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__SCREAMING_SNAKE_CASE , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
A_ : int = NERTransformer.add_model_specific_args(parser, os.getcwd())
A_ : List[Any] = parser.parse_args()
A_ : Union[str, Any] = NERTransformer(args)
A_ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
A_ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
A_ : List[str] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 38
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=6_4 , __SCREAMING_SNAKE_CASE=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , __SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE="preactivation" , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case__ : Tuple = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
snake_case__ : List[str] = num_channels
snake_case__ : Tuple = embedding_size
snake_case__ : str = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : List[Any] = layer_type
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = global_padding
snake_case__ : List[str] = num_groups
snake_case__ : str = drop_path_rate
snake_case__ : List[Any] = embedding_dynamic_padding
snake_case__ : List[str] = output_stride
snake_case__ : Dict = width_factor
snake_case__ : List[str] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : Dict = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]:
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : str = patch_size
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = scope
_lowerCamelCase : Tuple = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Tuple = (image_size // patch_size) ** 2
_lowerCamelCase : int = num_patches + 1
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : Optional[int] = None
if self.use_labels:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self) -> Optional[int]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Any = ViTModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : List[Any] = ViTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = ViTForMaskedImageModeling(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCamelCase : List[str] = ViTForImageClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[Any] = ViTForImageClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Dict = config_and_inputs
_lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase_ ,lowercase_ ,unittest.TestCase ):
__UpperCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : str = ViTModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37)
def UpperCamelCase_ ( self) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""")
def UpperCamelCase_ ( self) -> Optional[Any]:
pass
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_lowerCamelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear))
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> str:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[str] = ViTModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self) -> Optional[int]:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""") if is_vision_available() else None
@slow
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase : Tuple = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE)
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : Optional[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480)
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""")
_lowerCamelCase : Any = inputs.pixel_values.to(SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , interpolate_pos_encoding=SCREAMING_SNAKE_CASE)
# verify the logits
_lowerCamelCase : Union[str, Any] = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : List[str] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""")
_lowerCamelCase : Optional[int] = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""")
_lowerCamelCase : List[Any] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCamelCase : str = model(SCREAMING_SNAKE_CASE)
| 88
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
lowerCamelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
lowerCamelCase : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """whisper"""
lowerCAmelCase__ : Dict = ["""past_key_values"""]
lowerCAmelCase__ : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : int , UpperCamelCase : Optional[int]=51865 , UpperCamelCase : Any=80 , UpperCamelCase : Dict=6 , UpperCamelCase : str=4 , UpperCamelCase : Optional[Any]=6 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Tuple=1536 , UpperCamelCase : Dict=1536 , UpperCamelCase : Any=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : int=50257 , UpperCamelCase : List[str]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Dict=256 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : List[Any]=False , UpperCamelCase : int=1500 , UpperCamelCase : List[str]=448 , UpperCamelCase : int=50256 , UpperCamelCase : Optional[int]=50256 , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Any=None , UpperCamelCase : Tuple=[220, 50256] , UpperCamelCase : Optional[Any]=False , UpperCamelCase : int=256 , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=0.05 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=10 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Union[str, Any]=7 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase__ = {0: '''batch'''}
else:
lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' )
return common_inputs
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , UpperCamelCase : int = 22050 , UpperCamelCase : float = 5.0 , UpperCamelCase : int = 220 , ):
'''simple docstring'''
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
lowercase__ = encoder_inputs['''input_features'''].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowercase__ = encoder_inputs.pop('''input_features''' )
lowercase__ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return 1E-3
| 460
| 0
|
from __future__ import annotations
from math import pow, sqrt
def a_ (_lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCAmelCase , 2 ) - pow(_lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCAmelCase , 2 ) + pow(_lowerCAmelCase , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase :
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
def a_ (_lowerCAmelCase : TreeNode | None )-> bool:
# Validation
def is_valid_tree(_lowerCAmelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_lowerCAmelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
_lowerCAmelCase : TreeNode | None , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _lowerCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _lowerCAmelCase )
)
return is_binary_search_tree_recursive_check(_lowerCAmelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164
| 1
|
class lowercase_ :
def __init__( self , lowercase_ , lowercase_) -> str:
a__ =name
a__ =val
def __str__( self) -> Tuple:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , lowercase_) -> Any:
return self.val < other.val
class lowercase_ :
def __init__( self , lowercase_) -> Any:
a__ ={}
a__ ={}
a__ =self.build_heap(lowercase_)
def __getitem__( self , lowercase_) -> List[str]:
return self.get_value(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
return (idx - 1) // 2
def __UpperCamelCase ( self , lowercase_) -> int:
return idx * 2 + 1
def __UpperCamelCase ( self , lowercase_) -> List[Any]:
return idx * 2 + 2
def __UpperCamelCase ( self , lowercase_) -> Any:
return self.heap_dict[key]
def __UpperCamelCase ( self , lowercase_) -> str:
a__ =len(lowercase_) - 1
a__ =self.get_parent_idx(lowercase_)
for idx, i in enumerate(lowercase_):
a__ =idx
a__ =i.val
for i in range(lowercase_ , -1 , -1):
self.sift_down(lowercase_ , lowercase_)
return array
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> List[str]:
while True:
a__ =self.get_left_child_idx(lowercase_) # noqa: E741
a__ =self.get_right_child_idx(lowercase_)
a__ =idx
if l < len(lowercase_) and array[l] < array[idx]:
a__ =l
if r < len(lowercase_) and array[r] < array[smallest]:
a__ =r
if smallest != idx:
a__ , a__ =array[smallest], array[idx]
(
(
a__
) , (
a__
) ,
) =(
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a__ =smallest
else:
break
def __UpperCamelCase ( self , lowercase_) -> Dict:
a__ =self.get_parent_idx(lowercase_)
while p >= 0 and self.heap[p] > self.heap[idx]:
a__ , a__ =self.heap[idx], self.heap[p]
a__ , a__ =(
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a__ =p
a__ =self.get_parent_idx(lowercase_)
def __UpperCamelCase ( self) -> List[str]:
return self.heap[0]
def __UpperCamelCase ( self) -> Optional[int]:
a__ , a__ =self.heap[-1], self.heap[0]
a__ , a__ =(
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a__ =self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def __UpperCamelCase ( self , lowercase_) -> Tuple:
self.heap.append(lowercase_)
a__ =len(self.heap) - 1
a__ =node.val
self.sift_up(len(self.heap) - 1)
def __UpperCamelCase ( self) -> Union[str, Any]:
return len(self.heap) == 0
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a__ =new_value
a__ =new_value
self.sift_up(self.idx_of_element[node])
_lowerCAmelCase: Tuple = Node('R', -1)
_lowerCAmelCase: Optional[int] = Node('B', 6)
_lowerCAmelCase: Tuple = Node('A', 3)
_lowerCAmelCase: int = Node('X', 1)
_lowerCAmelCase: List[str] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCAmelCase: int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "bert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Any = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : List[Any] = position_embedding_type
__snake_case : Dict = use_cache
__snake_case : str = classifier_dropout
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
__snake_case : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 576
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _snake_case ( unittest.TestCase ):
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : int = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : int = AutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
lowercase__ : str = AutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
lowercase__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_) , 1_44_10)
lowercase__ : Optional[int] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_) , 1_44_10)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_) , 1_44_10)
lowercase__ : List[str] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE_ , from_tf=SCREAMING_SNAKE_CASE_)
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE_) , 1_44_10)
| 718
|
lowerCamelCase__ : Any = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 495
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE_ , id=SCREAMING_SNAKE_CASE_ )
| 247
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomTokenizer
pass
| 247
| 1
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=4 , ) ->Union[str, Any]:
UpperCAmelCase__ :Dict = parent
UpperCAmelCase__ :Optional[Any] = batch_size
UpperCAmelCase__ :Any = seq_length
UpperCAmelCase__ :Dict = is_training
UpperCAmelCase__ :str = use_attention_mask
UpperCAmelCase__ :List[str] = use_token_type_ids
UpperCAmelCase__ :Dict = use_labels
UpperCAmelCase__ :Dict = vocab_size
UpperCAmelCase__ :Optional[int] = hidden_size
UpperCAmelCase__ :str = num_hidden_layers
UpperCAmelCase__ :Union[str, Any] = num_attention_heads
UpperCAmelCase__ :Dict = intermediate_size
UpperCAmelCase__ :int = hidden_act
UpperCAmelCase__ :List[str] = hidden_dropout_prob
UpperCAmelCase__ :Any = attention_probs_dropout_prob
UpperCAmelCase__ :Optional[int] = max_position_embeddings
UpperCAmelCase__ :Tuple = type_vocab_size
UpperCAmelCase__ :Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ :str = initializer_range
UpperCAmelCase__ :List[Any] = num_choices
def A__ ( self ) ->Union[str, Any]:
UpperCAmelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ :Optional[Any] = None
if self.use_attention_mask:
UpperCAmelCase__ :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ :Tuple = None
if self.use_token_type_ids:
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ :Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self ) ->Dict:
UpperCAmelCase__ :int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :int = config_and_inputs
UpperCAmelCase__ :int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A__ ( self ) ->int:
UpperCAmelCase__ :Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Tuple = config_and_inputs
UpperCAmelCase__ :Any = True
UpperCAmelCase__ :str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase__ ( __a , unittest.TestCase):
'''simple docstring'''
__a : str = True
__a : str = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self ) ->int:
UpperCAmelCase__ :Optional[int] = FlaxBertModelTester(self )
@slow
def A__ ( self ) ->Tuple:
UpperCAmelCase__ :str = FlaxBertModel.from_pretrained('bert-base-cased' )
UpperCAmelCase__ :Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase_ )
| 719
|
import os
from pathlib import Path
def A ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
UpperCAmelCase__ :Any = Path(SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
UpperCAmelCase__ :Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , SCREAMING_SNAKE_CASE , with_cuda=SCREAMING_SNAKE_CASE , extra_include_paths=[str(SCREAMING_SNAKE_CASE )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 433
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """spiece.model"""}
snake_case__ : Dict = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
snake_case__ : Tuple = {"""bert_for_seq_generation""": 5_1_2}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = []
A_ = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<::::>" , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
return self.sp_model.piece_to_id(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = []
UpperCamelCase_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 23
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = OpenAIGPTTokenizer
_lowerCamelCase = OpenAIGPTTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return "lower newer", "lower newer"
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = "lower"
lowerCamelCase_ = ["low", "er</w>"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = tokens + ["<unk>"]
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self , UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
lowerCamelCase_ = "This is a simple input"
lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase_ = ("This is a simple input", "This is a pair")
lowerCamelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , )
def snake_case ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case ( lowercase ):
"""simple docstring"""
pass
| 675
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
return np.array_equal(__A , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Any:
_snake_case = v.conjugate().T
_snake_case = v_star.dot(__A )
assert isinstance(__A , np.ndarray )
return (v_star_dot.dot(__A )) / (v_star.dot(__A ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_snake_case = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_snake_case = np.array([[1], [2], [3]] )
assert is_hermitian(__A ), F'{a} is not hermitian.'
print(rayleigh_quotient(__A , __A ) )
_snake_case = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__A ), F'{a} is not hermitian.'
assert rayleigh_quotient(__A , __A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 542
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=0 ):
"""simple docstring"""
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[column] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=float("""inf""" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ : Optional[Any] = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=float("""inf""" ) ):
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE__ ):
for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ : Optional[int] = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# recursion
snake_case_ : Union[str, Any] = points_counts // 2
snake_case_ : Tuple = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[mid:] , points_counts - mid )
snake_case_ : Optional[int] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
return min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
snake_case_ : int = column_based_sort(SCREAMING_SNAKE_CASE__ , column=0 )
snake_case_ : List[Any] = column_based_sort(SCREAMING_SNAKE_CASE__ , column=1 )
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
) ** 0.5
if __name__ == "__main__":
a_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 480
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
snake_case_ : Optional[Any] = [0 for i in range(n + 1 )]
snake_case_ : int = 1
snake_case_ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = 1
snake_case_ : Any = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 480
| 1
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
if isinstance(__lowerCamelCase, __lowerCamelCase ):
_lowerCAmelCase = np.full((len(__lowerCamelCase ), sequence_length, 2), __lowerCamelCase )
else:
_lowerCAmelCase = np.full((len(__lowerCamelCase ), sequence_length), __lowerCamelCase )
for i, tensor in enumerate(__lowerCamelCase ):
if padding_side == "right":
if isinstance(__lowerCamelCase, __lowerCamelCase ):
_lowerCAmelCase = tensor[:sequence_length]
else:
_lowerCAmelCase = tensor[:sequence_length]
else:
if isinstance(__lowerCamelCase, __lowerCamelCase ):
_lowerCAmelCase = tensor[:sequence_length]
else:
_lowerCAmelCase = tensor[:sequence_length]
return out_tensor.tolist()
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = ord(__lowerCamelCase )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
_lowerCAmelCase = unicodedata.category(__lowerCamelCase )
if cat.startswith('P' ):
return True
return False
@dataclass
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : PreTrainedTokenizerBase
UpperCamelCase : Union[bool, str, PaddingStrategy] = True
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = -100
UpperCamelCase : str = "pt"
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
import torch
_lowerCAmelCase = 'label' if 'label' in features[0].keys() else 'labels'
_lowerCAmelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_lowerCAmelCase = self.tokenizer.pad(
__magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_lowerCAmelCase = torch.tensor(batch['entity_ids'] ).shape[1]
_lowerCAmelCase = self.tokenizer.padding_side
if padding_side == "right":
_lowerCAmelCase = [
list(__magic_name__ ) + [self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) for label in labels
]
else:
_lowerCAmelCase = [
[self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) + list(__magic_name__ ) for label in labels
]
_lowerCAmelCase = [feature['ner_tags'] for feature in features]
_lowerCAmelCase = padding_tensor(__magic_name__ , -1 , __magic_name__ , __magic_name__ )
_lowerCAmelCase = [feature['original_entity_spans'] for feature in features]
_lowerCAmelCase = padding_tensor(__magic_name__ , (-1, -1) , __magic_name__ , __magic_name__ )
_lowerCAmelCase = {k: torch.tensor(__magic_name__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 309
|
"""simple docstring"""
from __future__ import annotations
import queue
class __magic_name__ :
def __init__( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def A__ ( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
_lowerCAmelCase = input('Enter the value of the root node: ' ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(__lowerCamelCase ) )
q.put(__lowerCamelCase )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F'''Enter the left node of {node_found.data}: '''
_lowerCAmelCase = input(__lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(__lowerCamelCase ) )
_lowerCAmelCase = left_node
q.put(__lowerCamelCase )
_lowerCAmelCase = F'''Enter the right node of {node_found.data}: '''
_lowerCAmelCase = input(__lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(__lowerCamelCase ) )
_lowerCAmelCase = right_node
q.put(__lowerCamelCase )
raise
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
print(node.data, end=',' )
pre_order(node.left )
pre_order(node.right )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data, end=',' )
in_order(node.right )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=',' )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(__lowerCamelCase )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(__lowerCamelCase )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCamelCase )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=',' )
stack.append(__lowerCamelCase )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(__lowerCamelCase )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data, end=',' )
_lowerCAmelCase = n.right
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(__lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=',' )
def A__ ( __lowerCamelCase = "", __lowerCamelCase=5_0, __lowerCamelCase="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(__lowerCamelCase ) - 2, 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
a__ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 309
| 1
|
from __future__ import annotations
from math import pi
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "data2vec-vision"
def __init__( self ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1E-12 ,_A=224 ,_A=16 ,_A=3 ,_A=False ,_A=False ,_A=False ,_A=False ,_A=0.1 ,_A=0.1 ,_A=True ,_A=[3, 5, 7, 11] ,_A=[1, 2, 3, 6] ,_A=True ,_A=0.4 ,_A=256 ,_A=1 ,_A=False ,_A=255 ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : int = use_mask_token
_lowerCAmelCase : Any = use_absolute_position_embeddings
_lowerCAmelCase : List[str] = use_relative_position_bias
_lowerCAmelCase : str = use_shared_relative_position_bias
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : int = out_indices
_lowerCAmelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : int = use_auxiliary_head
_lowerCAmelCase : Any = auxiliary_loss_weight
_lowerCAmelCase : List[str] = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : str = auxiliary_concat_input
_lowerCAmelCase : Union[str, Any] = semantic_loss_ignore_index
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 259
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ = """\
Text data.
Second line of data."""
UpperCamelCase__ = """file"""
@pytest.fixture(scope="session" )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__lowerCAmelCase = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__lowerCAmelCase = input_paths[compression_format]
__lowerCAmelCase = tmp_path / "cache"
__lowerCAmelCase = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
__lowerCAmelCase = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
__lowerCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = "custom_cache"
__lowerCAmelCase = "custom_extracted_dir"
__lowerCAmelCase = tmp_path / "custom_extracted_path"
if default_extracted:
__lowerCAmelCase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowerCAmelCase = xz_file
__lowerCAmelCase = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
__lowerCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
# absolute path
__lowerCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
__lowerCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def _a ( SCREAMING_SNAKE_CASE_ : Dict ):
# absolute path
__lowerCAmelCase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
__lowerCAmelCase = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(SCREAMING_SNAKE_CASE_ ) as f:
__lowerCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
__lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 552
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=3 , _A=1_8 , _A=3_0 , _A=4_0_0 , _A=True , _A=None , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = size if size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = apply_ocr
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a__ ( snake_case__ , unittest.TestCase ):
_a : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "apply_ocr" ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
__lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _A )
self.assertIsInstance(encoding.boxes , _A )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__lowerCAmelCase = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
__lowerCAmelCase = Image.open(ds[0]["file"] ).convert("RGB" )
__lowerCAmelCase = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__lowerCAmelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
__lowerCAmelCase = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _A )
self.assertListEqual(encoding.boxes , _A )
# with apply_OCR = False
__lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=_A )
__lowerCAmelCase = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 552
| 1
|
lowerCamelCase : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase : List[str] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase : Dict = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
assert len(str(lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case : int = year // 100
snake_case : Dict = (5 * (century % 4) + 2) % 7
snake_case : Optional[Any] = year % 100
snake_case : Any = centurian % 12
snake_case : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case : Union[str, Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587
|
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : List[str] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
snake_case : Tuple = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
snake_case : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
snake_case : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
snake_case : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
snake_case : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
snake_case : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
snake_case : int = datetime.date(int(lowercase ) ,int(lowercase ) ,int(lowercase ) )
# Start math
if m <= 2:
snake_case : Union[str, Any] = y - 1
snake_case : Union[str, Any] = m + 12
# maths var
snake_case : int = int(str(lowercase )[:2] )
snake_case : int = int(str(lowercase )[2:] )
snake_case : int = int(2.6 * m - 5.39 )
snake_case : int = int(c / 4 )
snake_case : int = int(k / 4 )
snake_case : int = int(d + k )
snake_case : int = int(t + u + v + x )
snake_case : int = int(z - (2 * c) )
snake_case : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
snake_case : str = f"""Your date {date_input}, is a {days[str(lowercase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowerCamelCase : Dict = parser.parse_args()
zeller(args.date_input)
| 587
| 1
|
'''simple docstring'''
A__: Dict = tuple[float, float, float]
A__: Tuple = tuple[float, float, float]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Pointad ,_UpperCAmelCase : Pointad ) -> Vectorad:
'''simple docstring'''
_a : Optional[int] =end_pointa[0] - end_pointa[0]
_a : Any =end_pointa[1] - end_pointa[1]
_a : int =end_pointa[2] - end_pointa[2]
return (x, y, z)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Vectorad ,_UpperCAmelCase : Vectorad ) -> Vectorad:
'''simple docstring'''
_a : List[Any] =ab[1] * ac[2] - ab[2] * ac[1] # *i
_a : int =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_a : Optional[int] =ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Vectorad ,_UpperCAmelCase : int ) -> bool:
'''simple docstring'''
return tuple(round(_UpperCAmelCase ,_UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Pointad ,_UpperCAmelCase : Pointad ,_UpperCAmelCase : Pointad ,_UpperCAmelCase : int = 10 ) -> bool:
'''simple docstring'''
_a : str =create_vector(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[int] =create_vector(_UpperCAmelCase ,_UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase )
| 701
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any=False ) -> str:
_a : Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a : Optional[int] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
_a : str =""""""
else:
_a : Tuple ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a : Optional[int] =state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
_a : Any =state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_a : Any =in_proj_weight[
: config.hidden_size, :
]
_a : Dict =in_proj_bias[: config.hidden_size]
_a : Dict =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a : Optional[Any] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a : Union[str, Any] =in_proj_weight[
-config.hidden_size :, :
]
_a : List[str] =in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ) -> Optional[int]:
_a : Any =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Tuple:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
_a : str =[
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ) -> int:
_a : str =dct.pop(_UpperCAmelCase )
_a : Any =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ) -> str:
_a : List[Any] =ViTMSNConfig()
_a : Optional[int] =1000
_a : Union[str, Any] ="""datasets/huggingface/label-files"""
_a : Any ="""imagenet-1k-id2label.json"""
_a : Optional[int] =json.load(open(hf_hub_download(_UpperCAmelCase ,_UpperCAmelCase ) ,"""r""" ) )
_a : int ={int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_a : Optional[Any] =idalabel
_a : Dict ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_a : Tuple =384
_a : Optional[int] =1536
_a : Optional[int] =6
elif "l16" in checkpoint_url:
_a : int =1024
_a : int =4096
_a : List[str] =24
_a : Union[str, Any] =16
_a : Any =0.1
elif "b4" in checkpoint_url:
_a : Optional[int] =4
elif "l7" in checkpoint_url:
_a : Optional[int] =7
_a : Union[str, Any] =1024
_a : Dict =4096
_a : List[str] =24
_a : Any =16
_a : Dict =0.1
_a : Any =ViTMSNModel(_UpperCAmelCase )
_a : Union[str, Any] =torch.hub.load_state_dict_from_url(_UpperCAmelCase ,map_location="""cpu""" )["""target_encoder"""]
_a : Union[str, Any] =ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCAmelCase )
_a : List[str] =create_rename_keys(_UpperCAmelCase ,base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase ,_UpperCAmelCase ,base_model=_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : str =Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw )
_a : Union[str, Any] =ViTImageProcessor(
size=config.image_size ,image_mean=_UpperCAmelCase ,image_std=_UpperCAmelCase )
_a : Tuple =image_processor(images=_UpperCAmelCase ,return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
_a : str =model(**_UpperCAmelCase )
_a : Union[str, Any] =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_a : Tuple =torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
_a : Optional[int] =torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
_a : str =torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
_a : List[str] =torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
_a : Optional[int] =torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,_UpperCAmelCase ,atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
A__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__: Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 506
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _a ( lowerCAmelCase_ ):
a_ : List[str] = 'convbert'
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-12 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : int=7_68 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : str , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = embedding_size
lowerCamelCase__ = head_ratio
lowerCamelCase__ = conv_kernel_size
lowerCamelCase__ = num_groups
lowerCamelCase__ = classifier_dropout
class _a ( lowerCAmelCase_ ):
@property
def _UpperCamelCase ( self : List[Any] ):
if self.task == "multiple-choice":
lowerCamelCase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 510
|
'''simple docstring'''
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self , __a = None ):
'''simple docstring'''
__a : list = []
# Stores indexes of each item for supporting updates and deletion.
__a : dict = {}
# Stores current size of heap.
__a : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__a : Tuple = key or (lambda __a : x)
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Dict = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a , __a : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__a , __a : Optional[Any] = self.arr[j], self.arr[i]
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Any = self._left(__a )
__a : Union[str, Any] = self._right(__a )
__a : Tuple = i
if left is not None and not self._cmp(__a , __a ):
__a : int = left
if right is not None and not self._cmp(__a , __a ):
__a : Any = right
return valid_parent
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = self._parent(__a )
while parent is not None and not self._cmp(__a , __a ):
self._swap(__a , __a )
__a , __a : Optional[int] = parent, self._parent(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = self._get_valid_parent(__a )
while valid_parent != index:
self._swap(__a , __a )
__a , __a : Optional[Any] = valid_parent, self._get_valid_parent(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if item not in self.pos_map:
return
__a : Tuple = self.pos_map[item]
__a : int = [item, self.key(__a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__a )
self._heapify_down(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if item not in self.pos_map:
return
__a : int = self.pos_map[item]
del self.pos_map[item]
__a : Optional[int] = self.arr[self.size - 1]
__a : Optional[int] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__a )
self._heapify_down(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__a )] )
else:
__a : List[Any] = [item, self.key(__a )]
__a : Union[str, Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476
| 0
|
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=lowerCamelCase__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=lowerCamelCase__ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=lowerCamelCase__ , help='where to store parsed gold_data_path file' , )
lowerCAmelCase__ : Dict = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCAmelCase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowerCAmelCase__ : Any = dpr_record["question"]
lowerCAmelCase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(lowerCamelCase__ ) + '\n' )
if __name__ == "__main__":
main()
| 714
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 'lower newer'
lowerCAmelCase__ : Any = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokens + ['<unk>']
lowerCAmelCase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 69
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
snake_case_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
snake_case_ = '▁'
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : str = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : List[str] , a__ : Tuple , a__ : Tuple=True , a__ : Dict=True , a__ : Dict=False , a__ : Optional[Any]="[CLS]" , a__ : Union[str, Any]="[SEP]" , a__ : Optional[int]="<unk>" , a__ : Optional[int]="[SEP]" , a__ : Union[str, Any]="<pad>" , a__ : int="[CLS]" , a__ : List[str]="[MASK]" , a__ : Optional[Dict[str, Any]] = None , **a__ : Dict , ):
"""simple docstring"""
__snake_case = (
AddedToken(a__ , lstrip=a__ , rstrip=a__ , normalized=a__ )
if isinstance(a__ , a__ )
else mask_token
)
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def a (self : Any ):
"""simple docstring"""
return len(self.sp_model )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__(self : List[Any] , a__ : Any ):
"""simple docstring"""
__snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a (self : Union[str, Any] , a__ : Tuple ):
"""simple docstring"""
if self.remove_space:
__snake_case = ''' '''.join(inputs.strip().split() )
else:
__snake_case = inputs
__snake_case = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__snake_case = unicodedata.normalize('''NFKD''' , a__ )
__snake_case = ''''''.join([c for c in outputs if not unicodedata.combining(a__ )] )
if self.do_lower_case:
__snake_case = outputs.lower()
return outputs
def a (self : Optional[int] , a__ : str ):
"""simple docstring"""
__snake_case = self.preprocess_text(a__ )
__snake_case = self.sp_model.encode(a__ , out_type=a__ )
__snake_case = []
for piece in pieces:
if len(a__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(a__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case = cur_pieces[1:]
else:
__snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a__ )
else:
new_pieces.append(a__ )
return new_pieces
def a (self : Optional[Any] , a__ : Tuple ):
"""simple docstring"""
return self.sp_model.PieceToId(a__ )
def a (self : str , a__ : str ):
"""simple docstring"""
return self.sp_model.IdToPiece(a__ )
def a (self : Any , a__ : List[str] ):
"""simple docstring"""
__snake_case = []
__snake_case = ''''''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(a__ )
__snake_case = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a (self : Dict , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1]
def a (self : Any , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : List[Any] , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , '''wb''' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 592
|
def lowerCamelCase__ ( snake_case_ : Any ) -> List[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase__ ( snake_case_ : dict[int, list[int]] ) -> list[tuple[int, int]]:
__snake_case = 0
__snake_case = len(snake_case_ ) # No of vertices in graph
__snake_case = [0] * n
__snake_case = [False] * n
def dfs(snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] ):
__snake_case = True
__snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case_ , snake_case_ , snake_case_ , id_ )
__snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__snake_case = min(low[at] , low[to] )
__snake_case = []
for i in range(snake_case_ ):
if not visited[i]:
dfs(snake_case_ , -1 , snake_case_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''pegasus'''
snake_case__ = ['''past_key_values''']
snake_case__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , __UpperCamelCase : str=5_0265 , __UpperCamelCase : Optional[Any]=1024 , __UpperCamelCase : int=12 , __UpperCamelCase : List[str]=4096 , __UpperCamelCase : str=16 , __UpperCamelCase : int=12 , __UpperCamelCase : Union[str, Any]=4096 , __UpperCamelCase : List[Any]=16 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Dict=True , __UpperCamelCase : str="gelu" , __UpperCamelCase : int=1024 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[str]=1 , **__UpperCamelCase : Union[str, Any] , ) -> Union[str, Any]:
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = encoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
@property
def _UpperCamelCase ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : List[Any] ) -> int:
return self.d_model
| 342
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase ( a__ : Optional[int] , a__ : List[str] , a__ : List[str] , a__ : Union[str, Any] , a__ : Tuple , a__ : Dict ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(a__ ) , version.parse(a__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( a__ : str , a__ : Optional[str] = None ) -> None:
_UpperCamelCase = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = requirement, None, None
else:
_UpperCamelCase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase = {}
for w in want_range:
_UpperCamelCase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase = '''.'''.join([str(a__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
return
# check if any version is installed
try:
_UpperCamelCase = importlib.metadata.version(a__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
def lowercase ( a__ : Optional[int] ) -> Dict:
_UpperCamelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(a__ , a__ )
| 342
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Dict = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 70
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A__ ( UpperCAmelCase_="" ):
_UpperCamelCase : Any = tempfile.mkdtemp()
return os.path.join(UpperCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Optional[int] = AgentAudio(lowerCamelCase__ )
_UpperCamelCase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,atol=1E-4 ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Any = get_new_path(suffix='.wav' )
sf.write(lowerCamelCase__ ,lowerCamelCase__ ,16000 )
_UpperCamelCase : List[Any] = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
self.assertEqual(agent_type.to_string() ,lowerCamelCase__ )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = torch.randint(0 ,256 ,(64, 64, 3) )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
_UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type._tensor ,atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Tuple = Image.open(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Union[str, Any] = Image.open(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'Hey!'
_UpperCamelCase : Optional[int] = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,agent_type.to_string() )
self.assertEqual(lowerCamelCase__ ,agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 195
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _a ( unittest.TestCase ):
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowercase ) as mock_head:
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowercase ) as mock_head:
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : int ):
'''simple docstring'''
try:
UpperCAmelCase = tempfile.mktemp()
with open(lowercase , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowercase )
UpperCAmelCase = AlbertTokenizer.from_pretrained(lowercase )
finally:
os.remove(lowercase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowercase )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class _a ( unittest.TestCase ):
__a : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def A ( cls : int ):
'''simple docstring'''
UpperCAmelCase = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def A ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def A ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowercase )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase , repo_id='''test-tokenizer''' , push_to_hub=lowercase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(f"{USER}/test-tokenizer" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowercase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowercase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowercase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A ( self : Optional[Any] ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = CustomTokenizer(lowercase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowercase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(f"{USER}/test-dynamic-tokenizer" , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
UpperCAmelCase = AutoTokenizer.from_pretrained(
f"{USER}/test-dynamic-tokenizer" , use_fast=lowercase , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class _a ( unittest.TestCase ):
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = Trie()
UpperCAmelCase = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowercase , ['''AB''', '''C'''] )
| 703
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _a ( __a ):
__a : List[Any] = """mgp-str"""
def __init__( self : str , lowercase : str=[32, 128] , lowercase : Optional[Any]=4 , lowercase : Optional[Any]=3 , lowercase : Dict=27 , lowercase : Any=38 , lowercase : int=50_257 , lowercase : List[str]=30_522 , lowercase : Optional[int]=768 , lowercase : List[Any]=12 , lowercase : Tuple=12 , lowercase : Optional[int]=4.0 , lowercase : Union[str, Any]=True , lowercase : str=False , lowercase : str=1E-5 , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : Tuple=0.0 , lowercase : str=False , lowercase : Optional[Any]=0.02 , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = max_token_length
UpperCAmelCase = num_character_labels
UpperCAmelCase = num_bpe_labels
UpperCAmelCase = num_wordpiece_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = mlp_ratio
UpperCAmelCase = distilled
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = attn_drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = output_aa_attentions
UpperCAmelCase = initializer_range
| 358
| 0
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __lt__( self , __SCREAMING_SNAKE_CASE ):
return self[-1] < other[-1]
def __eq__( self , __SCREAMING_SNAKE_CASE ):
return self[-1] == other[-1]
def UpperCamelCase__ ( __magic_name__ : list ) -> list:
'''simple docstring'''
snake_case__ : list[Stack] = []
# sort into stacks
for element in collection:
snake_case__ : Optional[int] = Stack([element] )
snake_case__ : Optional[Any] = bisect_left(__magic_name__ , __magic_name__ )
if i != len(__magic_name__ ):
stacks[i].append(__magic_name__ )
else:
stacks.append(__magic_name__ )
# use a heap-based merge to merge stack efficiently
snake_case__ : Optional[int] = merge(*(reversed(__magic_name__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
A_ : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
A_ : Tuple = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 38
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase =False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase_ ( self ):
'''simple docstring'''
A__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A__ = torch.manual_seed(0 )
A__ = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
A__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 337
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''MobileNetV2FeatureExtractor''']
SCREAMING_SNAKE_CASE_ = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowercase__ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 183
| 0
|
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__magic_name__ : Any = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_8_0_0_0,
"""sample_size""": 6_5_5_3_6,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_8_0_0_0,
"""sample_size""": 6_5_5_3_6,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_8_0_0_0,
"""sample_size""": 1_3_1_0_7_2,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_6_0_0_0,
"""sample_size""": 6_5_5_3_6,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_6_0_0_0,
"""sample_size""": 6_5_5_3_6,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_6_0_0_0,
"""sample_size""": 6_5_5_3_6,
},
}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return torch.atana(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / math.pi * 2
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
UpperCamelCase : Dict = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
pass
class lowercase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
UpperCamelCase : List[str] = DiffusionAttnUnetaD(_A , n_attn_layers=4 )
UpperCamelCase : Tuple = deepcopy(self.diffusion )
UpperCamelCase : Dict = torch.quasirandom.SobolEngine(1 , scramble=_A )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
__magic_name__ : str = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
__magic_name__ : Dict = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
__magic_name__ : Any = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
__magic_name__ : Dict = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
__magic_name__ : Any = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
__magic_name__ : Any = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif name.startswith(SCREAMING_SNAKE_CASE ):
return [name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 ):
UpperCamelCase : Optional[Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
UpperCamelCase : int = 0
if string.startswith("""net.3.""" ):
depth += 1
UpperCamelCase : List[str] = string[6:]
elif string.startswith("""net.""" ):
UpperCamelCase : List[Any] = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
UpperCamelCase : List[Any] = string[7:]
if string.startswith("""main.""" ):
UpperCamelCase : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
UpperCamelCase : List[Any] = string[:2]
UpperCamelCase : Optional[Any] = string[2:]
else:
UpperCamelCase : str = string[0]
UpperCamelCase : int = string[1:]
if depth == max_depth:
UpperCamelCase : List[str] = MID_NUM_TO_LAYER[layer_num]
UpperCamelCase : Optional[Any] = """mid_block"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) < 7:
UpperCamelCase : List[str] = DOWN_NUM_TO_LAYER[layer_num]
UpperCamelCase : Dict = f"""down_blocks.{depth}"""
elif depth > 0 and int(SCREAMING_SNAKE_CASE ) > 7:
UpperCamelCase : Any = UP_NUM_TO_LAYER[layer_num]
UpperCamelCase : Optional[int] = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
UpperCamelCase : str = DEPTH_0_TO_LAYER[layer_num]
UpperCamelCase : int = f"""up_blocks.{max_depth - 1}""" if int(SCREAMING_SNAKE_CASE ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
UpperCamelCase : Dict = string_left[1:]
if "resnets" in new_layer:
UpperCamelCase : Optional[Any] = convert_resconv_naming(SCREAMING_SNAKE_CASE )
elif "attentions" in new_layer:
UpperCamelCase : Tuple = convert_attn_naming(SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = prefix + """.""" + new_layer + """.""" + string_left
else:
UpperCamelCase : Tuple = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
UpperCamelCase : List[Any] = rename(SCREAMING_SNAKE_CASE )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = transform_conv_attns(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : int = v
return new_state_dict
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) == 1:
if len(v.shape ) == 3:
# weight
UpperCamelCase : Any = v[:, :, 0]
else:
# bias
UpperCamelCase : List[str] = v
else:
# qkv matrices
UpperCamelCase : Union[str, Any] = v.shape[0]
UpperCamelCase : int = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCamelCase : Tuple = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCamelCase : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCamelCase : Any = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
UpperCamelCase : Union[str, Any] = download(SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = MODELS_MAP[model_name]["""sample_rate"""]
UpperCamelCase : Tuple = MODELS_MAP[model_name]["""sample_size"""]
UpperCamelCase : List[str] = Object()
UpperCamelCase : Union[str, Any] = sample_size
UpperCamelCase : Dict = sample_rate
UpperCamelCase : Any = 0
UpperCamelCase : int = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE , sample_rate=SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = diffusers_model.state_dict()
UpperCamelCase : Optional[Any] = DiffusionUncond(SCREAMING_SNAKE_CASE )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE )["""state_dict"""] )
UpperCamelCase : Optional[Any] = orig_model.diffusion_ema.eval()
UpperCamelCase : Optional[int] = orig_model.state_dict()
UpperCamelCase : str = rename_orig_weights(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCamelCase : Optional[Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(SCREAMING_SNAKE_CASE ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
UpperCamelCase : int = value.squeeze()
UpperCamelCase : str = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = 100
UpperCamelCase : List[Any] = 33
UpperCamelCase : Tuple = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase : str = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE )[:-1]
UpperCamelCase : Tuple = get_crash_schedule(SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = torch.manual_seed(33 )
UpperCamelCase : Dict = pipe(num_inference_steps=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).audios
UpperCamelCase : Optional[Any] = sampling.iplms_sample(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {} )
UpperCamelCase : Any = generated.clamp(-1 , 1 )
UpperCamelCase : List[Any] = (generated - audio).abs().sum()
UpperCamelCase : Optional[int] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , SCREAMING_SNAKE_CASE )
print("""Diff max""" , SCREAMING_SNAKE_CASE )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__magic_name__ : List[str] = parser.parse_args()
main(args)
| 102
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : int = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase : str = """convnextv2"""
def __init__( self , _A=3 , _A=4 , _A=4 , _A=None , _A=None , _A="gelu" , _A=0.02 , _A=1e-1_2 , _A=0.0 , _A=2_2_4 , _A=None , _A=None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCamelCase : Optional[Any] = num_channels
UpperCamelCase : int = patch_size
UpperCamelCase : Dict = num_stages
UpperCamelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
UpperCamelCase : str = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = drop_path_rate
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
| 102
| 1
|
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE = 1_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE = pre_numerator
SCREAMING_SNAKE_CASE = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE = cur_numerator
SCREAMING_SNAKE_CASE = e_cont * pre_numerator + temp
return sum_digits(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 450
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , a : nn.Module , a : int ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE = module
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(module.in_features , a , bias=a ) , nn.Linear(a , module.out_features , bias=a ) , )
SCREAMING_SNAKE_CASE = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _UpperCAmelCase ( self : Tuple , a : str , *a : List[Any] , **a : List[Any] ) -> List[Any]:
return self.module(a , *a , **a ) + self.adapter(a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = '''bigscience/bloom-1b7'''
# Constant values
a__ = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
a__ = '''Hello my name is'''
a__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
a__ = 1_0
def _UpperCAmelCase ( self : Dict ) -> str:
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(self.model_name )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
def _UpperCAmelCase ( self : Tuple ) -> int:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.model_abit.config
self.assertTrue(hasattr(a , """quantization_config""" ) )
SCREAMING_SNAKE_CASE = config.to_dict()
SCREAMING_SNAKE_CASE = config.to_diff_dict()
SCREAMING_SNAKE_CASE = config.to_json_string()
def _UpperCAmelCase ( self : Any ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _UpperCAmelCase ( self : Tuple ) -> List[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _UpperCAmelCase ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
with self.assertRaises(a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a )
def _UpperCAmelCase ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE = BitsAndBytesConfig()
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def _UpperCAmelCase ( self : Optional[Any] ) -> int:
with self.assertRaises(a ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE = self.model_fpaa.float()
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=a , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE = """t5-small"""
SCREAMING_SNAKE_CASE = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE = """Translate in German: Hello, my dog is cute"""
def _UpperCAmelCase ( self : Any ) -> List[str]:
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[str] ) -> int:
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = None
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
SCREAMING_SNAKE_CASE = modules
def _UpperCAmelCase ( self : int ) -> int:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map="""auto""" )
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
SCREAMING_SNAKE_CASE = model.generate(**a )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
# model_name
SCREAMING_SNAKE_CASE = """bigscience/bloom-560m"""
SCREAMING_SNAKE_CASE = """t5-small"""
# Different types of model
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# Sequence classification model
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map="""auto""" )
# CausalLM model
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map="""auto""" )
# Seq2seq model
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map="""auto""" )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Dict ) -> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] ) -> str:
super().setUp()
def _UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ) -> Optional[int]:
super().setUp()
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
SCREAMING_SNAKE_CASE = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def _UpperCAmelCase ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
super().setUp()
def _UpperCAmelCase ( self : Any ) -> Tuple:
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a ) ):
SCREAMING_SNAKE_CASE = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE = model.forward(**a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = '''gpt2-xl'''
a__ = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 450
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_a : List[str] = k.replace(__a , __a )
if k.startswith("""encoder""" ):
_a : str = k.replace(""".attn""" , """.self_attn""" )
_a : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
_a : str = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
_a : Union[str, Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
_a : List[str] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
_a : Dict = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_a : int = sd.pop(__a )
_a : int = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
_a : Optional[Any] = v
_snake_case = ['''START''']
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = torch.load(__a , map_location="""cpu""" )
_a : Optional[int] = model["""model"""]
_a : Dict = BlenderbotConfig.from_json_file(__a )
_a : List[str] = BlenderbotForConditionalGeneration(__a )
_a : Dict = m.model.state_dict().keys()
_a : Union[str, Any] = []
_a : List[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_a : int = rename_state_dict_key(__a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_a : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__a )
m.model.load_state_dict(__a , strict=__a )
m.half()
m.save_pretrained(__a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 389
|
def __lowerCamelCase ( __a :Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = len(__a )
while cur > 1:
# Find the maximum number in arr
A__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A__ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
A__ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
A : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
A : int = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 176
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Optional[int] = KandinskyVaaControlnetImgaImgPipeline
__a : int = ["image_embeds", "negative_image_embeds", "image", "hint"]
__a : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__a : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__a : Any = False
@property
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
return 3_2
@property
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
return 3_2
@property
def A ( self : int ) -> Tuple:
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : List[Any] ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
return 1_0_0
@property
def A ( self : Optional[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCamelCase__ = UNetaDConditionModel(**lowercase )
return model
@property
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : int ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCamelCase__ = DDIMScheduler(**lowercase )
UpperCamelCase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A ( self : List[str] , lowercase : Tuple , lowercase : List[str]=0 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
UpperCamelCase__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowercase ) ).to(lowercase )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(lowercase ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create hint
UpperCamelCase__ = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowercase ) ).to(lowercase )
if str(lowercase ).startswith("""mps""" ):
UpperCamelCase__ = torch.manual_seed(lowercase )
else:
UpperCamelCase__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
UpperCamelCase__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def A ( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ = """cpu"""
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**lowercase )
UpperCamelCase__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(lowercase ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase__ = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
UpperCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCamelCase__ = init_image.resize((5_1_2, 5_1_2) )
UpperCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
UpperCamelCase__ = torch.from_numpy(np.array(lowercase ) ).float() / 2_5_5.0
UpperCamelCase__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase__ = """A robot, 4k photo"""
UpperCamelCase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
UpperCamelCase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
UpperCamelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
lowercase , image=lowercase , strength=0.8_5 , generator=lowercase , negative_prompt="""""" , ).to_tuple()
UpperCamelCase__ = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , hint=lowercase , generator=lowercase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="""np""" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 265
|
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : bool = True , lowercase : bool = False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = scheduler
UpperCamelCase__ = optimizers if isinstance(lowercase , (list, tuple) ) else [optimizers]
UpperCamelCase__ = split_batches
UpperCamelCase__ = step_with_optimizer
UpperCamelCase__ = GradientState()
def A ( self : int , *lowercase : int , **lowercase : str ) -> str:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowercase , **lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowercase , **lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase__ = AcceleratorState().num_processes
for _ in range(lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowercase , **lowercase )
else:
self.scheduler.step(*lowercase , **lowercase )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def A ( self : Any ) -> int:
'''simple docstring'''
return self.scheduler.state_dict()
def A ( self : Any , lowercase : int ) -> Optional[int]:
'''simple docstring'''
self.scheduler.load_state_dict(lowercase )
def A ( self : str ) -> int:
'''simple docstring'''
return self.scheduler.get_lr()
def A ( self : Union[str, Any] , *lowercase : List[Any] , **lowercase : str ) -> str:
'''simple docstring'''
return self.scheduler.print_lr(*lowercase , **lowercase )
| 265
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
return "".join(sorted(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> list[str]:
"""simple docstring"""
return word_by_signature[signature(lowerCamelCase__ )]
_a : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
_a : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()})
_a : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_a : Any = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 168
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : Tuple = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_a : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a : int = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_a : Any = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Any = None
# source code of `config_class`
__UpperCAmelCase : Any = inspect.getsource(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = _re_checkpoint.findall(lowerCamelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
__UpperCAmelCase : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__UpperCAmelCase : List[str] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__UpperCAmelCase : Union[str, Any] = ckpt_name
break
return checkpoint
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__UpperCAmelCase : List[Any] = get_checkpoint_from_config_class(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = "\n".join(sorted(lowerCamelCase__ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 168
| 1
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCAmelCase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
UpperCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCAmelCase = '''allenai'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase = dict((re.sub(r'@@$' , '' , __SCREAMING_SNAKE_CASE ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , __SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
lowercase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowercase = d[k] # restore
return da
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# prep
assert os.path.exists(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowercase = basename(__SCREAMING_SNAKE_CASE )
lowercase = dirname(__SCREAMING_SNAKE_CASE )
lowercase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase = cls.hub_models()
lowercase = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowercase = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
lowercase = hub_utils.from_pretrained(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , archive_map=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase = vars(chkpt['args']['model'] )
lowercase = args['source_lang']
lowercase = args['target_lang']
lowercase = dirname(__SCREAMING_SNAKE_CASE )
lowercase = basename(__SCREAMING_SNAKE_CASE )
# dicts
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , F'''dict.{src_lang}.txt''' )
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , F'''dict.{tgt_lang}.txt''' )
lowercase = Dictionary.load(__SCREAMING_SNAKE_CASE )
lowercase = rewrite_dict_keys(src_dict.indices )
lowercase = len(__SCREAMING_SNAKE_CASE )
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'vocab-src.json' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE , indent=__SCREAMING_SNAKE_CASE ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase = True
for k in src_vocab.keys():
if not k.islower():
lowercase = False
break
lowercase = Dictionary.load(__SCREAMING_SNAKE_CASE )
lowercase = rewrite_dict_keys(tgt_dict.indices )
lowercase = len(__SCREAMING_SNAKE_CASE )
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'vocab-tgt.json' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE , indent=__SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ):
break
with open(__SCREAMING_SNAKE_CASE , encoding='utf-8' ) as fin:
lowercase = fin.read()
lowercase = re.sub(r' \d+$' , '' , __SCREAMING_SNAKE_CASE , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as fout:
fout.write(__SCREAMING_SNAKE_CASE )
# model config
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowercase = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowercase = 5
lowercase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase = best_score_hparams[model_dir]['length_penalty']
else:
lowercase = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE , indent=__SCREAMING_SNAKE_CASE ) )
# tokenizer config
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE , indent=__SCREAMING_SNAKE_CASE ) )
# model
lowercase = chkpt['models'][0]
lowercase = model.state_dict()
# rename keys to start with 'model.'
lowercase = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = FSMTConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = FSMTForConditionalGeneration(__SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
# save
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 565
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=snake_case , prev_timestep=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='fixed_small_log' )
lowercase = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='learned_range' )
lowercase = scheduler_class(**snake_case )
lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=snake_case ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=snake_case ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=snake_case ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(snake_case ):
# 1. predict noise residual
lowercase = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
scheduler.set_timesteps(25 )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(snake_case ):
# 1. predict noise residual
lowercase = model(snake_case , snake_case )
if i + 1 == timesteps.shape[0]:
lowercase = None
else:
lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(
snake_case , snake_case , snake_case , prev_timestep=snake_case , generator=snake_case ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 565
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.