code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = VideoToVideoSDPipeline
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {'image', 'width', 'height'}
__UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {'image'}
__UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCAmelCase : Any = False
# No `output_type`.
__UpperCAmelCase : str = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
__lowercase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
__lowercase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
__lowercase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
__lowercase : int = CLIPTextModel(_UpperCAmelCase )
__lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowercase : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any]=0 ):
# 3 frames
__lowercase : int = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("mps" ):
__lowercase : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
__lowercase : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__lowercase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case ( self : List[Any] ):
__lowercase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase : Optional[Any] = self.get_dummy_components()
__lowercase : Union[str, Any] = VideoToVideoSDPipeline(**_UpperCAmelCase )
__lowercase : int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase : Any = self.get_dummy_inputs(_UpperCAmelCase )
__lowercase : Optional[int] = 'np'
__lowercase : Optional[int] = sd_pipe(**_UpperCAmelCase ).frames
__lowercase : str = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
__lowercase : int = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def snake_case ( self : Any ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def snake_case ( self : List[Any] ):
pass
def snake_case ( self : Any ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Tuple ):
__lowercase : Optional[Any] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__lowercase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase : Tuple = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_UpperCAmelCase )
__lowercase : Optional[int] = video.to("cuda" )
__lowercase : List[Any] = 'Spiderman is surfing'
__lowercase : Any = pipe(_UpperCAmelCase , video=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=3 , output_type="pt" ).frames
__lowercase : Dict = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 575
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a = 6_3_7_8_1_3_7.0
a = 6_3_5_6_7_5_2.3_1_4_2_4_5
a = 6_378_137
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
_lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
_lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_lowerCAmelCase :str = (b_lata + b_lata) / 2
_lowerCAmelCase :Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2)
_lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2
_lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2)
_lowerCAmelCase :str = sin(sigma / 2 ) ** 2
_lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 687
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
__SCREAMING_SNAKE_CASE = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 4
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = "left"
def __init__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str=False , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : int="<sep>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Tuple="<cls>" , __lowerCamelCase : Optional[Any]="<mask>" , __lowerCamelCase : int=["<eop>", "<eod>"] , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A : str = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
A : int = 3
A : Union[str, Any] = do_lower_case
A : List[Any] = remove_space
A : List[Any] = keep_accents
A : str = vocab_file
A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : str = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Optional[Any]:
A : Optional[int] = self.__dict__.copy()
A : Dict = None
return state
def __setstate__( self : Any , __lowerCamelCase : Any ) -> Dict:
A : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A : Optional[int] = {}
A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : int ) -> Any:
if self.remove_space:
A : Union[str, Any] = " ".join(inputs.strip().split() )
else:
A : str = inputs
A : Dict = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A : Optional[int] = unicodedata.normalize("NFKD" , __lowerCamelCase )
A : Tuple = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
A : Tuple = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
A : Dict = self.preprocess_text(__lowerCamelCase )
A : Tuple = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
A : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A : Optional[Any] = cur_pieces[1:]
else:
A : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : Tuple ) -> Dict:
return self.sp_model.PieceToId(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[int] ) -> List[str]:
return self.sp_model.IdToPiece(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Tuple:
A : Any = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Union[str, Any] , ) -> str:
A : Any = kwargs.pop("use_source_tokenizer" , __lowerCamelCase )
A : Any = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A : Dict = []
A : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
A : Optional[Any] = []
sub_texts.append(__lowerCamelCase )
else:
current_sub_text.append(__lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A : Any = "".join(__lowerCamelCase )
A : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A : Dict = self.clean_up_tokenization(__lowerCamelCase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : Dict = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : Optional[Any] = [self.sep_token_id]
A : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A : str = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
A : Tuple = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 17
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Optional[int] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Optional[Any] ) -> bool:
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None ) -> List[Any]:
A : str = max_length
A : Optional[int] = max_position_embeddings
@add_start_docstrings(__lowerCamelCase )
def __call__( self : List[str] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Any ) -> bool:
A : List[Any] = input_ids.shape[-1]
A : Any = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[Any]:
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , __lowerCamelCase , )
A : str = start_length
A : Optional[Any] = max_new_tokens
A : Dict = start_length + max_new_tokens
@add_start_docstrings(__lowerCamelCase )
def __call__( self : int , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : float , __lowerCamelCase : Optional[float] = None ) -> List[Any]:
A : str = max_time
A : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Any , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : Tuple ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( _A ):
'''simple docstring'''
@add_start_docstrings(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : torch.LongTensor , __lowerCamelCase : torch.FloatTensor , **__lowerCamelCase : int ) -> bool:
return any(criteria(__lowerCamelCase , __lowerCamelCase ) for criteria in self )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = stopping_criteria.max_length
A : Any = deepcopy(_lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase ) )
return new_stopping_criteria
| 17
| 1
|
from math import factorial
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> float:
'''simple docstring'''
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
__lowercase= (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowercase= float(factorial(lowercase__ ) )
coefficient /= factorial(lowercase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.7_5))
| 230
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase ) for s in shape] )}.npy'
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 4, 6_4, 6_4) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return image
def _A (self , lowerCAmelCase=False , lowerCAmelCase="CompVis/stable-diffusion-v1-4" ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= 'bf16' if fpaa else None
__lowercase, __lowercase= FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase , subfolder='unet' , dtype=lowerCAmelCase , revision=lowerCAmelCase )
return model, params
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 7_7, 7_6_8) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
| 230
| 1
|
'''simple docstring'''
def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ):
"""simple docstring"""
_lowerCAmelCase :str = len(_lowerCAmelCase )
_lowerCAmelCase :Any = [[0] * n for i in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
_lowerCAmelCase :str = y_points[i]
for i in range(2 , _lowerCAmelCase ):
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase :Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'WhisperFeatureExtractor'
lowerCamelCase : List[Any] = 'WhisperTokenizer'
def __init__( self: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :int = self.feature_extractor
_lowerCAmelCase :List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int]=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_UpperCAmelCase , language=_UpperCAmelCase , no_timestamps=_UpperCAmelCase )
def __call__( self: List[Any] , *_UpperCAmelCase: List[Any] , **_UpperCAmelCase: int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :int = kwargs.pop('audio' , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = kwargs.pop('sampling_rate' , _UpperCAmelCase )
_lowerCAmelCase :Any = kwargs.pop('text' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_lowerCAmelCase :Union[str, Any] = args[0]
_lowerCAmelCase :str = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_lowerCAmelCase :int = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
_lowerCAmelCase :str = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase :List[str] = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE__ ( self: int , *_UpperCAmelCase: int , **_UpperCAmelCase: Dict ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Any , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: Optional[int] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Optional[Any]="np" ):
return self.tokenizer.get_prompt_ids(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
| 382
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase_ :
def __init__( self ):
UpperCAmelCase__ : list[Any] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = 0
def __UpperCAmelCase ( self ):
return self.head == self.tail
def __UpperCAmelCase ( self , _lowerCAmelCase ):
self.data.append(_lowerCAmelCase )
UpperCAmelCase__ : Dict = self.tail + 1
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.data[self.head]
UpperCAmelCase__ : Optional[Any] = self.head + 1
return ret
def __UpperCAmelCase ( self ):
return self.tail - self.head
def __UpperCAmelCase ( self ):
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = data
UpperCAmelCase__ : MyNode | None = None
UpperCAmelCase__ : MyNode | None = None
UpperCAmelCase__ : int = 1
def __UpperCAmelCase ( self ):
return self.data
def __UpperCAmelCase ( self ):
return self.left
def __UpperCAmelCase ( self ):
return self.right
def __UpperCAmelCase ( self ):
return self.height
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Any = data
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[str] = node
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = node
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = height
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def _lowerCamelCase ( __lowerCamelCase ) -> MyNode:
'''simple docstring'''
print("""left rotation node:""" , node.get_data() )
UpperCAmelCase__ : Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowerCamelCase )
UpperCAmelCase__ : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def _lowerCamelCase ( __lowerCamelCase ) -> MyNode:
'''simple docstring'''
print("""right rotation node:""" , node.get_data() )
UpperCAmelCase__ : Optional[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowerCamelCase )
UpperCAmelCase__ : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
UpperCAmelCase__ : Any = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def _lowerCamelCase ( __lowerCamelCase ) -> MyNode:
'''simple docstring'''
UpperCAmelCase__ : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowerCamelCase ) )
return right_rotation(_lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase ) -> MyNode:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowerCamelCase ) )
return left_rotation(_lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(_lowerCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowerCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
UpperCAmelCase__ : Union[str, Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCAmelCase__ : Any = right_rotation(_lowerCamelCase )
else:
UpperCAmelCase__ : Tuple = lr_rotation(_lowerCamelCase )
else:
node.set_right(insert_node(node.get_right() , _lowerCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
UpperCAmelCase__ : Optional[int] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCAmelCase__ : Optional[int] = rl_rotation(_lowerCamelCase )
else:
UpperCAmelCase__ : Tuple = left_rotation(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
return node
def _lowerCamelCase ( __lowerCamelCase ) -> Any:
'''simple docstring'''
while True:
UpperCAmelCase__ : Dict = root.get_right()
if right_child is None:
break
UpperCAmelCase__ : List[Any] = right_child
return root.get_data()
def _lowerCamelCase ( __lowerCamelCase ) -> Any:
'''simple docstring'''
while True:
UpperCAmelCase__ : Optional[Any] = root.get_left()
if left_child is None:
break
UpperCAmelCase__ : int = left_child
return root.get_data()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> MyNode | None:
'''simple docstring'''
UpperCAmelCase__ : int = root.get_left()
UpperCAmelCase__ : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCAmelCase__ : Tuple = get_left_most(_lowerCamelCase )
root.set_data(_lowerCamelCase )
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
elif left_child is not None:
UpperCAmelCase__ : Optional[int] = left_child
elif right_child is not None:
UpperCAmelCase__ : str = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(_lowerCamelCase , _lowerCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
if get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
UpperCAmelCase__ : Optional[Any] = left_rotation(_lowerCamelCase )
else:
UpperCAmelCase__ : str = rl_rotation(_lowerCamelCase )
elif get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
UpperCAmelCase__ : List[Any] = right_rotation(_lowerCamelCase )
else:
UpperCAmelCase__ : List[str] = lr_rotation(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowerCamelCase )
return root
class UpperCAmelCase_ :
def __init__( self ):
UpperCAmelCase__ : MyNode | None = None
def __UpperCAmelCase ( self ):
return get_height(self.root )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
print("""insert:""" + str(_lowerCAmelCase ) )
UpperCAmelCase__ : Optional[Any] = insert_node(self.root , _lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
print("""delete:""" + str(_lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
UpperCAmelCase__ : str = del_node(self.root , _lowerCAmelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
UpperCAmelCase__ : List[Any] = """"""
UpperCAmelCase__ : int = MyQueue()
q.push(self.root )
UpperCAmelCase__ : int = self.get_height()
if layer == 0:
return output
UpperCAmelCase__ : Any = 0
while not q.is_empty():
UpperCAmelCase__ : Optional[int] = q.pop()
UpperCAmelCase__ : Optional[int] = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_lowerCAmelCase )
q.push(_lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCAmelCase__ : Tuple = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _lowerCAmelCase ) - 1:
UpperCAmelCase__ : Tuple = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE__ : Optional[int] = AVLtree()
SCREAMING_SNAKE_CASE__ : Optional[int] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 79
|
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26
| 0
|
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a = 6 ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.create_linked_list(_a )
def __lowerCAmelCase ( self, _a ) -> None:
__SCREAMING_SNAKE_CASE = Node()
__SCREAMING_SNAKE_CASE = current_node
__SCREAMING_SNAKE_CASE = current_node
__SCREAMING_SNAKE_CASE = current_node
for _ in range(1, _a ):
__SCREAMING_SNAKE_CASE = Node()
__SCREAMING_SNAKE_CASE = current_node
__SCREAMING_SNAKE_CASE = previous_node
__SCREAMING_SNAKE_CASE = current_node
__SCREAMING_SNAKE_CASE = self.front
__SCREAMING_SNAKE_CASE = previous_node
def __lowerCAmelCase ( self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCAmelCase ( self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCAmelCase ( self, _a ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__SCREAMING_SNAKE_CASE = self.rear.next
if self.rear:
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__SCREAMING_SNAKE_CASE = self.front.data
__SCREAMING_SNAKE_CASE = None
return data
__SCREAMING_SNAKE_CASE = self.front
__SCREAMING_SNAKE_CASE = old_front.next
__SCREAMING_SNAKE_CASE = old_front.data
__SCREAMING_SNAKE_CASE = None
return data
def __lowerCAmelCase ( self ) -> None:
if self.is_empty():
raise Exception("Empty Queue" )
def __lowerCAmelCase ( self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ =(("""eta""", 0.0), ("""num_inference_steps""", 50))
def __lowerCAmelCase ( self, **_a ) -> Dict:
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_a )
return config
def __lowerCAmelCase ( self, **_a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> List[Any]:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_a )
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(steps_offset=1 )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __lowerCAmelCase ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a, beta_end=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_a )
def __lowerCAmelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_a, prediction_type=_a, sample_max_value=_a, )
def __lowerCAmelCase ( self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=_a )
def __lowerCAmelCase ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 5_00] ):
self.check_over_forward(time_step=_a, num_inference_steps=_a )
def __lowerCAmelCase ( self ) -> Any:
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_a, eta=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20, 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80, 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87, 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99, 9_98 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
scheduler.set_timesteps(_a )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
__SCREAMING_SNAKE_CASE = samplea.shape[0]
__SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea], dim=0 )
__SCREAMING_SNAKE_CASE = torch.arange(_a )[0:3, None].repeat(1, _a )
__SCREAMING_SNAKE_CASE = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
__SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(_a, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), _a )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 214
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class a__ ( __magic_name__ ):
lowercase_ = "mra"
def __init__( self : Any , UpperCamelCase_ : List[Any]=50265 , UpperCamelCase_ : List[Any]=768 , UpperCamelCase_ : Dict=12 , UpperCamelCase_ : int=12 , UpperCamelCase_ : str=3072 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[str]=1e-5 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any="full" , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : Dict=0 , UpperCamelCase_ : int=2 , **UpperCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Optional[Any] = position_embedding_type
__UpperCAmelCase : Any = block_per_row
__UpperCAmelCase : Optional[int] = approx_mode
__UpperCAmelCase : int = initial_prior_first_n_blocks
__UpperCAmelCase : Tuple = initial_prior_diagonal_n_blocks
| 77
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
# set absolute/relative position embeddings parameter
_UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : int = True
# hparam_utils.py hparams
_UpperCAmelCase : Optional[int] = 0.664_694
_UpperCAmelCase : Any = 0.207_951
_UpperCAmelCase : Any = 0.121_194
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[Any] = 0.0_352_513
_UpperCAmelCase : Union[str, Any] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
_UpperCAmelCase : List[Any] = 36.4_519
_UpperCAmelCase : Union[str, Any] = 0.903_421
_UpperCAmelCase : List[Any] = 222.088
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : str = 0.763_141
_UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
elif task == "TABFACT":
_UpperCAmelCase : int = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
elif task == "MLM":
_UpperCAmelCase : List[str] = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase : Optional[Any] = TapasModel(config=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : int , _snake_case : int ) ->list[list[int]]:
"""simple docstring"""
__snake_case : list[list[int]] = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) ->None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def lowercase ( _snake_case : list[list[int]] ) ->None:
"""simple docstring"""
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Optional[Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 721
|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase ( _snake_case : list[int] , _snake_case : list[int] , _snake_case : int ) ->list[int]:
"""simple docstring"""
__snake_case : List[Any] = [0] * no_of_processes
__snake_case : str = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_snake_case ):
__snake_case : Optional[Any] = burst_time[i]
__snake_case : list[int] = []
__snake_case : Optional[Any] = 0
__snake_case : Dict = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__snake_case : Optional[Any] = []
__snake_case : List[str] = -1
for i in range(_snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_snake_case )
if len(_snake_case ) > 0:
__snake_case : Union[str, Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__snake_case : int = i
total_time += burst_time[target_process]
completed += 1
__snake_case : Optional[Any] = 0
__snake_case : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase ( _snake_case : list[int] , _snake_case : int , _snake_case : list[int] ) ->list[int]:
"""simple docstring"""
__snake_case : Optional[int] = [0] * no_of_processes
for i in range(_snake_case ):
__snake_case : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : List[Any] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE : Optional[Any] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE : Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 229
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase =logging.get_logger(__name__)
def a ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
a_ = 'backbone.' if is_semantic else ''
a_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
a_ = 'backbone.' if is_semantic else ''
# queries, keys and values
a_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
a_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
a_ = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
a_ = in_proj_weight[
: config.hidden_size, :
]
a_ = q_bias
a_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ = in_proj_weight[
-config.hidden_size :, :
]
a_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
a_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
a_ = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
a_ = gamma_a
a_ = gamma_a
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = dct.pop(_UpperCAmelCase )
a_ = val
def a ( ) -> str:
"""simple docstring"""
a_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
a_ = False if 'rvlcdip' in checkpoint_url else True
a_ = BeitConfig(use_absolute_position_embeddings=_UpperCAmelCase , use_mask_token=_UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
a_ = 1_0_2_4
a_ = 4_0_9_6
a_ = 2_4
a_ = 1_6
# labels
if "rvlcdip" in checkpoint_url:
a_ = 1_6
a_ = 'huggingface/label-files'
a_ = 'rvlcdip-id2label.json'
a_ = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
a_ = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
a_ = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )['model']
a_ = create_rename_keys(_UpperCAmelCase , has_lm_head=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , has_lm_head=_UpperCAmelCase )
# load HuggingFace model
a_ = BeitForMaskedImageModeling(_UpperCAmelCase ) if has_lm_head else BeitForImageClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
a_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCAmelCase )
a_ = prepare_img()
a_ = image_processor(images=_UpperCAmelCase , return_tensors='pt' )
a_ = encoding['pixel_values']
a_ = model(_UpperCAmelCase )
a_ = outputs.logits
# verify logits
a_ = [1, 1_6] if 'rvlcdip' in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(_UpperCAmelCase ), "Shape of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
if has_lm_head:
a_ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
a_ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__lowerCAmelCase =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 697
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697
| 1
|
"""simple docstring"""
def UpperCAmelCase ( ):
"""simple docstring"""
return 1
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ = 200 ):
"""simple docstring"""
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 714
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"vocab_file": "vocab.txt"}
__lowerCamelCase = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
__lowerCamelCase = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
with open(UpperCamelCase__ , 'r' ) as f:
A__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Any = VOCAB_FILES_NAMES
lowerCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Dict = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<cls>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase="<eos>" ,**__UpperCAmelCase ,) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
A__ = load_vocab_file(__UpperCAmelCase )
A__ = dict(enumerate(self.all_tokens ) )
A__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
A__ = unk_token
A__ = cls_token
A__ = pad_token
A__ = mask_token
A__ = eos_token
A__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
return self._id_to_token.get(__UpperCAmelCase ,self.unk_token )
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
return self._token_to_id.get(__UpperCAmelCase ,self._token_to_id.get(self.unk_token ) )
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
return text.split()
def snake_case__ ( self ,__UpperCAmelCase=False ) -> int:
return len(self._id_to_token )
def snake_case__ ( self ) -> List[str]:
return {token: i for i, token in enumerate(self.all_tokens )}
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
return self._token_to_id.get(__UpperCAmelCase ,self._token_to_id.get(self.unk_token ) )
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
return self._id_to_token.get(__UpperCAmelCase ,self.unk_token )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
A__ = [self.cls_token_id]
A__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
A__ = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__UpperCAmelCase ) + [1]
return mask
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
A__ = os.path.join(__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(__UpperCAmelCase ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def snake_case__ ( self ) -> int:
return self.get_vocab_size(with_added_tokens=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> int:
return super()._add_tokens(__UpperCAmelCase ,special_tokens=__UpperCAmelCase )
| 536
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
a__ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_A)
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[str] ) -> List[str]:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self : Any , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=None ) -> int:
_A = {}
_A = {}
if prompt is not None:
_A = prompt
if generate_kwargs is not None:
_A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[str] , __lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=None ) -> int:
_A = load_image(__lowerCAmelCase )
if prompt is not None:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(__lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
_A = self.model.config.model_type
if model_type == "git":
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids
_A = [self.tokenizer.cls_token_id] + input_ids
_A = torch.tensor(__lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_A = self.image_processor(images=__lowerCAmelCase , header_text=__lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
_A = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_A = None
return model_inputs
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict=None ) -> str:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_A = None
if generate_kwargs is None:
_A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_A = model_inputs.pop(self.model.main_input_name )
_A = self.model.generate(__lowerCAmelCase , **__lowerCAmelCase , **__lowerCAmelCase )
return model_outputs
def snake_case_ ( self : Dict , __lowerCAmelCase : Any ) -> Union[str, Any]:
_A = []
for output_ids in model_outputs:
_A = {
'''generated_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
| 2
| 0
|
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCAmelCase ( lowerCAmelCase_ = 100 )-> int:
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase_ : List[str] = pre_numerator
lowerCAmelCase_ : List[str] = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ : Any = cur_numerator
lowerCAmelCase_ : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] =logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] ={
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """gpt_neox_japanese"""
def __init__( self , __lowercase=3_2_0_0_0 , __lowercase=2_5_6_0 , __lowercase=3_2 , __lowercase=3_2 , __lowercase=4 , __lowercase="gelu" , __lowercase=1.00 , __lowercase=1_0_0_0_0 , __lowercase=2_0_4_8 , __lowercase=0.02 , __lowercase=1e-5 , __lowercase=True , __lowercase=3_1_9_9_6 , __lowercase=3_1_9_9_9 , __lowercase=0.1 , __lowercase=0.0 , **__lowercase , ) -> str:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = intermediate_multiple_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Dict = rotary_pct
lowerCAmelCase_ : Union[str, Any] = rotary_emb_base
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout
| 619
| 0
|
from __future__ import annotations
from cmath import sqrt
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> tuple[complex, complex]:
'''simple docstring'''
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
lowercase : int = b * b - 4 * a * c
lowercase : Any = (-b + sqrt(__magic_name__ )) / (2 * a)
lowercase : Union[str, Any] = (-b - sqrt(__magic_name__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def snake_case( ) -> int:
'''simple docstring'''
lowercase , lowercase : Tuple = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 217
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 217
| 1
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = CodeGenTokenizer
lowerCAmelCase = CodeGenTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = {'add_prefix_space': True}
lowerCAmelCase = False
def __A ( self : Tuple ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase = {"unk_token": "<unk>"}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __A ( self : Tuple , **SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __A ( self : List[str] , **SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = "lower newer"
lowerCAmelCase = "lower newer"
return input_text, output_text
def __A ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = "lower newer"
lowerCAmelCase = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowerCAmelCase = "lower newer"
# Testing tokenization
lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowerCAmelCase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowerCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowerCAmelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowerCAmelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowerCAmelCase = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __A ( self : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
pass
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : str=1_5 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowerCAmelCase = "This is a simple input"
lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase = ("This is a simple input", "This is a pair")
lowerCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def __A ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCAmelCase = "This is a simple input"
lowerCAmelCase = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase = ("This is a simple input", "This is a pair")
lowerCAmelCase = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase = tokenizer.pad_token_id
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=3_0 , return_tensors="np" )
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowerCAmelCase = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=6_0 , return_tensors="np" )
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = "$$$"
lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowerCAmelCase = "This is a simple input"
lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase = tokenizer.bos_token_id
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase = tokenizer.decode(out_s.input_ids )
lowerCAmelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE )
lowerCAmelCase = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase = tokenizer.decode(SCREAMING_SNAKE_CASE , truncate_before_pattern=SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
| 715
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 159
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCAmelCase , )
return config, input_ids, attention_mask
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = FlaxDistilBertModelTester(self )
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""distilbert-base-uncased""" )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class __UpperCamelCase (unittest.TestCase ):
@slow
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowercase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
lowercase = (1, 11, 768)
self.assertEqual(output.shape , _lowerCAmelCase )
lowercase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 588
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowercase_ : Dict = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
return sd
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any=rename_keys_prefix ):
lowercase = OrderedDict()
lowercase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase = key
for name_pair in rename_keys_prefix:
lowercase = new_key.replace(name_pair[0] , name_pair[1] )
lowercase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : int ):
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowercase = """pretraining"""
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
elif "nlvr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 1024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 512}
lowercase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048}
lowercase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2048, """num_labels""": 3129}
lowercase = """vqa"""
elif "nlvr" in checkpoint_path:
lowercase = {
"""visual_embedding_dim""": 1024,
"""num_labels""": 2,
}
lowercase = """nlvr"""
lowercase = VisualBertConfig(**lowercase_ )
# Load State Dict
lowercase = load_state_dict(lowercase_ )
lowercase = get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
lowercase = VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
lowercase = VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
lowercase = VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
lowercase = VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 588
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase :Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase :Optional[int] = "▁"
__UpperCAmelCase :List[str] = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__UpperCAmelCase :int = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__UpperCAmelCase :Optional[int] = {"vinai/bartpho-syllable": 1_0_2_4}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict , snake_case : Tuple , snake_case : str , snake_case : Optional[int]="<s>" , snake_case : Tuple="</s>" , snake_case : Tuple="</s>" , snake_case : int="<s>" , snake_case : Tuple="<unk>" , snake_case : Dict="<pad>" , snake_case : str="<mask>" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
__UpperCAmelCase : str = vocab_file
__UpperCAmelCase : int = monolingual_vocab_file
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(snake_case ) not in self.fairseq_tokens_to_ids:
__UpperCAmelCase : str = cnt
cnt += 1
with open(snake_case , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
__UpperCAmelCase : Optional[Any] = line.strip().split()[0]
__UpperCAmelCase : Tuple = len(self.fairseq_tokens_to_ids )
if str(snake_case ) not in self.fairseq_tokens_to_ids:
__UpperCAmelCase : Optional[int] = len(self.fairseq_tokens_to_ids )
__UpperCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> Any:
__UpperCAmelCase : Any = self.__dict__.copy()
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , snake_case : Any ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self : Tuple , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : str ) -> Optional[int]:
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Optional[Any] , snake_case : str ) -> List[str]:
return self.sp_model.encode(snake_case , out_type=snake_case )
def lowerCamelCase__ ( self : Any , snake_case : List[Any] ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any ) -> str:
__UpperCAmelCase : List[str] = ''''''.join(snake_case ).replace(snake_case , ''' ''' ).strip()
return out_string
def lowerCamelCase__ ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Optional[int] = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : Union[str, Any] = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , '''wb''' ) as fi:
__UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
snake_case ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , snake_case )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(snake_case , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(snake_case )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 266
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _a ( _lowercase : Any ):
'''simple docstring'''
if hor == 128:
__UpperCAmelCase : Optional[Any] = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__UpperCAmelCase : Union[str, Any] = (32, 128, 256)
__UpperCAmelCase : str = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
__UpperCAmelCase : Optional[Any] = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
__UpperCAmelCase : Any = (32, 64, 128, 256)
__UpperCAmelCase : Optional[int] = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
__UpperCAmelCase : Any = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
__UpperCAmelCase : str = model.state_dict()
__UpperCAmelCase : int = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
__UpperCAmelCase : Optional[Any] = UNetaDModel(**_lowercase )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__UpperCAmelCase : Union[str, Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
hf_value_function.load_state_dict(_lowercase )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : str = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
__UpperCAmelCase : str = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
__UpperCAmelCase : Optional[Any] = model
__UpperCAmelCase : Optional[int] = UNetaDModel(**_lowercase )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__UpperCAmelCase : Union[str, Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__UpperCAmelCase : Dict = state_dict.pop(_lowercase )
hf_value_function.load_state_dict(_lowercase )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 266
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] ) -> Dict:
__snake_case = OmegaConf.load(_a )
__snake_case = torch.load(_a , map_location='''cpu''' )['''model''']
__snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
__snake_case = {}
__snake_case = '''first_stage_model.'''
for key in keys:
if key.startswith(_a ):
__snake_case = state_dict[key]
# extract state_dict for UNetLDM
__snake_case = {}
__snake_case = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_a ):
__snake_case = state_dict[key]
__snake_case = config.model.params.first_stage_config.params
__snake_case = config.model.params.unet_config.params
__snake_case = VQModel(**_a ).eval()
vqvae.load_state_dict(_a )
__snake_case = UNetLDMModel(**_a ).eval()
unet.load_state_dict(_a )
__snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_a , )
__snake_case = LDMPipeline(_a , _a , _a )
pipeline.save_pretrained(_a )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
snake_case_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 592
|
"""simple docstring"""
_snake_case = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 510
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_a : str = logging.get_logger(__name__)
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''input_features''', '''is_longer''']
def __init__( self , UpperCAmelCase=6_4 , UpperCAmelCase=4_8_0_0_0 , UpperCAmelCase=4_8_0 , UpperCAmelCase=1_0 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase = 0 , UpperCAmelCase = 1_4_0_0_0 , UpperCAmelCase = None , UpperCAmelCase = "fusion" , UpperCAmelCase = "repeatpad" , **UpperCAmelCase , ):
super().__init__(
feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase = top_db
__lowerCamelCase = truncation
__lowerCamelCase = padding
__lowerCamelCase = fft_window_size
__lowerCamelCase = (fft_window_size >> 1) + 1
__lowerCamelCase = hop_length
__lowerCamelCase = max_length_s
__lowerCamelCase = max_length_s * sampling_rate
__lowerCamelCase = sampling_rate
__lowerCamelCase = frequency_min
__lowerCamelCase = frequency_max
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase , min_frequency=UpperCAmelCase , max_frequency=UpperCAmelCase , sampling_rate=UpperCAmelCase , norm=UpperCAmelCase , mel_scale="""htk""" , )
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase , min_frequency=UpperCAmelCase , max_frequency=UpperCAmelCase , sampling_rate=UpperCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase_ ( self ):
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
__lowerCamelCase = spectrogram(
UpperCAmelCase , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
# randomly choose index for each part
__lowerCamelCase = np.random.choice(ranges[0] )
__lowerCamelCase = np.random.choice(ranges[1] )
__lowerCamelCase = np.random.choice(ranges[2] )
__lowerCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__lowerCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__lowerCamelCase = torch.tensor(mel[None, None, :] )
__lowerCamelCase = torch.nn.functional.interpolate(
UpperCAmelCase , size=[chunk_frames, 6_4] , mode="""bilinear""" , align_corners=UpperCAmelCase )
__lowerCamelCase = mel_shrink[0][0].numpy()
__lowerCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCamelCase = len(UpperCAmelCase ) - max_length
__lowerCamelCase = np.random.randint(0 , overflow + 1 )
__lowerCamelCase = waveform[idx : idx + max_length]
__lowerCamelCase = self._np_extract_fbank_features(UpperCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(UpperCAmelCase , self.mel_filters )
__lowerCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowerCamelCase = False
else:
__lowerCamelCase = self._random_mel_fusion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
__lowerCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCamelCase = int(max_length / len(UpperCAmelCase ) )
__lowerCamelCase = np.stack(np.tile(UpperCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowerCamelCase = int(max_length / len(UpperCAmelCase ) )
__lowerCamelCase = np.stack(np.tile(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase = np.pad(UpperCAmelCase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(UpperCAmelCase , self.mel_filters )
__lowerCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowerCamelCase = self._np_extract_fbank_features(UpperCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
__lowerCamelCase = truncation if truncation is not None else self.truncation
__lowerCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowerCamelCase = isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__lowerCamelCase = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
__lowerCamelCase = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [np.asarray(UpperCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCamelCase = [
self._get_input_mel(UpperCAmelCase , max_length if max_length else self.nb_max_samples , UpperCAmelCase , UpperCAmelCase )
for waveform in raw_speech
]
__lowerCamelCase = []
__lowerCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase )
is_longer.append(UpperCAmelCase )
if truncation == "fusion" and sum(UpperCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCamelCase = np.random.randint(0 , len(UpperCAmelCase ) )
__lowerCamelCase = True
if isinstance(input_mel[0] , UpperCAmelCase ):
__lowerCamelCase = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowerCamelCase = [[longer] for longer in is_longer]
__lowerCamelCase = {"""input_features""": input_mel, """is_longer""": is_longer}
__lowerCamelCase = BatchFeature(UpperCAmelCase )
if return_tensors is not None:
__lowerCamelCase = input_features.convert_to_tensors(UpperCAmelCase )
return input_features
| 571
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
__lowerCamelCase = parent
__lowerCamelCase = 1_3
__lowerCamelCase = 7
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = 9_9
__lowerCamelCase = 3_2
__lowerCamelCase = 2
__lowerCamelCase = 4
__lowerCamelCase = 3_7
__lowerCamelCase = """gelu"""
__lowerCamelCase = 0.1
__lowerCamelCase = 0.1
__lowerCamelCase = 5_1_2
__lowerCamelCase = 1_6
__lowerCamelCase = 2
__lowerCamelCase = 0.02
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = None
def lowerCamelCase_ ( self ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerModel(config=UpperCAmelCase )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(UpperCAmelCase )
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = True
__lowerCamelCase = TFRoFormerForCausalLM(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerForMaskedLM(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForSequenceClassification(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFRoFormerForMultipleChoice(config=UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForTokenClassification(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(UpperCAmelCase )[0]
# TODO Replace vocab size
__lowerCamelCase = 5_0_0_0_0
__lowerCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCamelCase = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = 1e-4
def lowerCamelCase_ ( self ):
__lowerCamelCase = tf.constant([[4, 1_0]] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCamelCase = emba(input_ids.shape )
__lowerCamelCase = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def lowerCamelCase_ ( self ):
__lowerCamelCase = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__lowerCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = 1e-4
def lowerCamelCase_ ( self ):
# 2,12,16,64
__lowerCamelCase = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCamelCase = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__lowerCamelCase = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__lowerCamelCase , __lowerCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
__lowerCamelCase = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 571
| 1
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
a : Dict = False
a : str = False
def lowerCamelCase__ ( __lowerCamelCase : Namespace ):
return TrainCommand(__lowerCamelCase )
class a ( lowercase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( __lowercase : ArgumentParser ) -> Dict:
__UpperCAmelCase : Optional[int] = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__lowercase , required=__lowercase , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__lowercase , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__lowercase , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__lowercase , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__lowercase , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__lowercase , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__lowercase , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__lowercase , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__lowercase , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__lowercase , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__lowercase , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__lowercase , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__lowercase , default=1e-0_8 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Dict , __lowercase : Namespace ) -> Dict:
__UpperCAmelCase : Union[str, Any] = logging.get_logger("""transformers-cli/training""" )
__UpperCAmelCase : Any = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__lowercase )
__UpperCAmelCase : List[Any] = args.output
__UpperCAmelCase : List[Any] = args.column_label
__UpperCAmelCase : int = args.column_text
__UpperCAmelCase : Union[str, Any] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__UpperCAmelCase : Any = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
__UpperCAmelCase : Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__UpperCAmelCase : int = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
__UpperCAmelCase : List[str] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__UpperCAmelCase : Optional[Any] = args.validation_split
__UpperCAmelCase : Dict = args.train_batch_size
__UpperCAmelCase : List[Any] = args.valid_batch_size
__UpperCAmelCase : List[str] = args.learning_rate
__UpperCAmelCase : Any = args.adam_epsilon
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
raise NotImplementedError
def UpperCAmelCase ( self : str ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 63
|
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCamelCase__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , __SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase__ : Any = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 410
| 0
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowercase = NewType("""DataClass""", Any)
__lowercase = NewType("""DataClassType""", Any)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = {str(SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE : str_to_choice.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( *,
SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = dataclasses.MISSING , SCREAMING_SNAKE_CASE = dataclasses.MISSING , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A_ = {}
if aliases is not None:
A_ = aliases
if help is not None:
A_ = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , default_factory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class _lowercase ( __lowerCamelCase ):
_lowercase : Iterable[DataClassType]
def __init__( self : Dict , lowerCamelCase__ : Union[DataClassType, Iterable[DataClassType]] , **lowerCamelCase__ : Dict ) -> str:
"""simple docstring"""
if "formatter_class" not in kwargs:
A_ = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase__ )
if dataclasses.is_dataclass(lowerCamelCase__ ):
A_ = [dataclass_types]
A_ = list(lowerCamelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase__ )
@staticmethod
def UpperCamelCase ( lowerCamelCase__ : ArgumentParser , lowerCamelCase__ : dataclasses.Field ) -> Dict:
"""simple docstring"""
A_ = F"--{field.name}"
A_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase__ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
A_ = kwargs.pop('''aliases''' , [] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ = [aliases]
A_ = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(lowerCamelCase__ , '''UnionType''' ) and isinstance(lowerCamelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase__ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F" Problem encountered in field '{field.name}'." )
if type(lowerCamelCase__ ) not in field.type.__args__:
# filter `str` in Union
A_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A_ = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A_ = (
field.type.__args__[0] if isinstance(lowerCamelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
A_ = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A_ = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase__ ) and issubclass(field.type , lowerCamelCase__ )):
if origin_type is Literal:
A_ = field.type.__args__
else:
A_ = [x.value for x in field.type]
A_ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
A_ = field.default
else:
A_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A_ = copy(lowerCamelCase__ )
# Hack because type=bool in argparse does not behave as we want.
A_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A_ = default
# This tells argparse we accept 0 or 1 value after --field_name
A_ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
A_ = True
elif isclass(lowerCamelCase__ ) and issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ = field.type.__args__[0]
A_ = '''+'''
if field.default_factory is not dataclasses.MISSING:
A_ = field.default_factory()
elif field.default is dataclasses.MISSING:
A_ = True
else:
A_ = field.type
if field.default is not dataclasses.MISSING:
A_ = field.default
elif field.default_factory is not dataclasses.MISSING:
A_ = field.default_factory()
else:
A_ = True
parser.add_argument(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A_ = False
parser.add_argument(F"--no_{field.name}" , action='''store_false''' , dest=field.name , **lowerCamelCase__ )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : DataClassType ) -> List[str]:
"""simple docstring"""
if hasattr(lowerCamelCase__ , '''_argument_group_name''' ):
A_ = self.add_argument_group(dtype._argument_group_name )
else:
A_ = self
try:
A_ = get_type_hints(lowerCamelCase__ )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowerCamelCase__ ):
A_ = '''.'''.join(map(lowerCamelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowerCamelCase__ ):
if not field.init:
continue
A_ = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : Any , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=False , lowerCamelCase__ : str=True , lowerCamelCase__ : int=None , lowerCamelCase__ : List[Any]=None , ) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A_ = []
if args_filename:
args_files.append(Path(lowerCamelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A_ = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase__ , type=lowerCamelCase__ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
A_ ,A_ = args_file_parser.parse_known_args(args=lowerCamelCase__ )
A_ = vars(lowerCamelCase__ ).get(args_file_flag.lstrip('''-''' ) , lowerCamelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase__ ) for p in cmd_args_file_paths] )
A_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A_ = file_args + args if args is not None else file_args + sys.argv[1:]
A_ ,A_ = self.parse_known_args(args=lowerCamelCase__ )
A_ = []
for dtype in self.dataclass_types:
A_ = {f.name for f in dataclasses.fields(lowerCamelCase__ ) if f.init}
A_ = {k: v for k, v in vars(lowerCamelCase__ ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase__ , lowerCamelCase__ )
A_ = dtype(**lowerCamelCase__ )
outputs.append(lowerCamelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : Dict[str, Any] , lowerCamelCase__ : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
A_ = set(args.keys() )
A_ = []
for dtype in self.dataclass_types:
A_ = {f.name for f in dataclasses.fields(lowerCamelCase__ ) if f.init}
A_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A_ = dtype(**lowerCamelCase__ )
outputs.append(lowerCamelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase__ )}" )
return tuple(lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(lowerCamelCase__ ) , encoding='''utf-8''' ) as open_json_file:
A_ = json.loads(open_json_file.read() )
A_ = self.parse_dict(lowerCamelCase__ , allow_extra_keys=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def UpperCamelCase ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : bool = False ) -> Tuple[DataClass, ...]:
"""simple docstring"""
A_ = self.parse_dict(yaml.safe_load(Path(lowerCamelCase__ ).read_text() ) , allow_extra_keys=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 563
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
A_ = 4
A_ = (1 << p) - 1
for _ in range(p - 2 ):
A_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 563
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase(unittest.TestCase ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_8 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=4_0_0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Dict:
"""simple docstring"""
a__ = size if size is not None else {'height': 1_8, 'width': 1_8}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase(a_ , unittest.TestCase ):
__snake_case: Dict = DPTImageProcessor if is_vision_available() else None
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = DPTImageProcessingTester(self )
@property
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
a__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 273
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 186
| 0
|
'''simple docstring'''
from __future__ import annotations
__snake_case : List[str] = list[list[int]]
# assigning initial values to the grid
__snake_case : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowercase ( lowerCamelCase__ : Matrix, lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowercase ( lowerCamelCase__ : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowercase ( lowerCamelCase__ : Matrix ):
if location := find_empty_location(lowerCamelCase__ ):
_a , _a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
_a = digit
if sudoku(lowerCamelCase__ ) is not None:
return grid
_a = 0
return None
def _lowercase ( lowerCamelCase__ : Matrix ):
for row in grid:
for cell in row:
print(lowerCamelCase__, end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__snake_case : Dict = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 691
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : int = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 691
| 1
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCamelCase__ : List[str] = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def UpperCamelCase ( _lowerCAmelCase : str = "dhaka", _lowerCAmelCase : int = 5 ) -> int:
_UpperCAmelCase : str = min(_lowerCAmelCase, 50 ) # Prevent abuse!
_UpperCAmelCase : List[str] = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_UpperCAmelCase : Optional[int] = requests.get("""https://www.google.com/search""", params=_lowerCAmelCase, headers=_lowerCAmelCase )
_UpperCAmelCase : int = BeautifulSoup(html.text, """html.parser""" )
_UpperCAmelCase : Any = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""", str(soup.select("""script""" ) ) ) )
_UpperCAmelCase : Optional[int] = json.dumps(_lowerCAmelCase )
_UpperCAmelCase : str = json.loads(_lowerCAmelCase )
_UpperCAmelCase : Any = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""", _lowerCAmelCase, )
if not matched_google_image_data:
return 0
_UpperCAmelCase : Any = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""", """""", str(_lowerCAmelCase ), )
_UpperCAmelCase : Union[str, Any] = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""", _lowerCAmelCase, )
for index, fixed_full_res_image in enumerate(_lowerCAmelCase ):
if index >= max_images:
return index
_UpperCAmelCase : Tuple = bytes(_lowerCAmelCase, """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase : Optional[Any] = bytes(_lowerCAmelCase, """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase : List[str] = urllib.request.build_opener()
_UpperCAmelCase : Optional[int] = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = f'''query_{query.replace(" ", "_" )}'''
if not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
_lowerCAmelCase, f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
lowerCamelCase__ : List[str] = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 238
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ["""CLIPFeatureExtractor"""]
__UpperCamelCase : Tuple = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase : int = None
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[Any] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
UpperCAmelCase : Optional[int] = '▁'
class lowerCamelCase__ ( __a ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ['''input_ids''', '''attention_mask''']
__a = BarthezTokenizer
def __init__( self : str , UpperCamelCase : int=None , UpperCamelCase : str=None , UpperCamelCase : str="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Dict="</s>" , UpperCamelCase : Any="<s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : str="<pad>" , UpperCamelCase : Optional[Any]="<mask>" , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : int = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
__UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Dict = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 139
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase_ ( lowercase: str , lowercase: complex , lowercase: str = "x" , lowercase: float = 10**-10 , lowercase: int = 1 , ) -> complex:
'''simple docstring'''
_UpperCamelCase: Any = symbols(lowercase )
_UpperCamelCase: str = lambdify(lowercase , lowercase )
_UpperCamelCase: str = lambdify(lowercase , diff(lowercase , lowercase ) )
_UpperCamelCase: Optional[int] = starting_point
while True:
if diff_function(lowercase ) != 0:
_UpperCamelCase: int = prev_guess - multiplicity * func(lowercase ) / diff_function(
lowercase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCamelCase: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson('exp(x) - 1', 1_0, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 271
| 0
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowerCAmelCase ( lowercase : Optional[int] ) -> int:
"""simple docstring"""
snake_case : Tuple = np.inf
def set_batch_size(lowercase : Dict ) -> None:
nonlocal batch_size
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[Any] = min(UpperCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : str = min(UpperCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and feature.dtype == "binary":
snake_case : int = min(UpperCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCamelCase__ , UpperCamelCase__ )
return None if batch_size is np.inf else batch_size
class _lowerCAmelCase ( lowercase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : int = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
snake_case : Optional[Any] = _PACKAGED_DATASETS_MODULES["parquet"][1]
snake_case : List[str] = Parquet(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , hash=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
snake_case : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case : str = None
snake_case : Optional[Any] = None
snake_case : Optional[int] = None
snake_case : int = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
snake_case : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
snake_case : str = dataset
snake_case : List[Any] = path_or_buf
snake_case : Optional[int] = batch_size or get_writer_batch_size(dataset.features )
snake_case : List[str] = parquet_writer_kwargs
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
snake_case : Optional[int] = self._write(file_obj=UpperCamelCase__ , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
else:
snake_case : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase__ , **self.parquet_writer_kwargs )
return written
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = 0
snake_case : Optional[Any] = parquet_writer_kwargs.pop("path_or_buf" , UpperCamelCase__ )
snake_case : List[Any] = self.dataset.features.arrow_schema
snake_case : List[str] = pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ , **UpperCamelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase__ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
snake_case : Optional[int] = query_table(
table=self.dataset._data , key=slice(UpperCamelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase__ )
written += batch.nbytes
writer.close()
return written
| 720
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__snake_case = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__snake_case = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__snake_case = """|""".join(sys.argv[1:])
__snake_case = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__snake_case = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 117
| 0
|
UpperCamelCase = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def A ( lowercase__ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A ( lowercase__ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def A ( ) -> None:
UpperCamelCase__ :Union[str, Any] = """Morse code here!"""
print(lowercase__ )
UpperCamelCase__ :Dict = encrypt(lowercase__ )
print(lowercase__ )
UpperCamelCase__ :Optional[Any] = decrypt(lowercase__ )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __snake_case( __A ):
_A = '''deformable_detr'''
_A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=1_024 , A_=6 , A_=1_024 , A_=8 , A_=6 , A_=1_024 , A_=8 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=True , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=4 , A_=4 , A_=4 , A_=False , A_=300 , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , A_=0.25 , A_=False , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A_ , A_ ):
_SCREAMING_SNAKE_CASE = backbone_config.get('''model_type''' )
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE = config_class.from_dict(A_ )
_SCREAMING_SNAKE_CASE = use_timm_backbone
_SCREAMING_SNAKE_CASE = backbone_config
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = num_queries
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = encoder_ffn_dim
_SCREAMING_SNAKE_CASE = encoder_layers
_SCREAMING_SNAKE_CASE = encoder_attention_heads
_SCREAMING_SNAKE_CASE = decoder_ffn_dim
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = init_std
_SCREAMING_SNAKE_CASE = init_xavier_std
_SCREAMING_SNAKE_CASE = encoder_layerdrop
_SCREAMING_SNAKE_CASE = auxiliary_loss
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = backbone
_SCREAMING_SNAKE_CASE = use_pretrained_backbone
_SCREAMING_SNAKE_CASE = dilation
# deformable attributes
_SCREAMING_SNAKE_CASE = num_feature_levels
_SCREAMING_SNAKE_CASE = encoder_n_points
_SCREAMING_SNAKE_CASE = decoder_n_points
_SCREAMING_SNAKE_CASE = two_stage
_SCREAMING_SNAKE_CASE = two_stage_num_proposals
_SCREAMING_SNAKE_CASE = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_SCREAMING_SNAKE_CASE = class_cost
_SCREAMING_SNAKE_CASE = bbox_cost
_SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE = mask_loss_coefficient
_SCREAMING_SNAKE_CASE = dice_loss_coefficient
_SCREAMING_SNAKE_CASE = bbox_loss_coefficient
_SCREAMING_SNAKE_CASE = giou_loss_coefficient
_SCREAMING_SNAKE_CASE = eos_coefficient
_SCREAMING_SNAKE_CASE = focal_alpha
_SCREAMING_SNAKE_CASE = disable_custom_kernels
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def A ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A ( self ):
'''simple docstring'''
return self.d_model
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 168
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase : str = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=3_0_5_2_2, type=int)
lowerCamelCase : Dict = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
lowerCamelCase : Union[str, Any] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowerCamelCase : Tuple = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCamelCase : Dict = [0] * args.vocab_size
for k, v in counter.items():
lowerCamelCase : str = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 168
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
class a ( a__ ):
snake_case__ = ['''input_features''', '''is_longer''']
def __init__( self , _snake_case=64 , _snake_case=4_80_00 , _snake_case=4_80 , _snake_case=10 , _snake_case=10_24 , _snake_case=0.0 , _snake_case=False , _snake_case = 0 , _snake_case = 1_40_00 , _snake_case = None , _snake_case = "fusion" , _snake_case = "repeatpad" , **_snake_case , ):
"""simple docstring"""
super().__init__(
feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
lowerCAmelCase = top_db
lowerCAmelCase = truncation
lowerCAmelCase = padding
lowerCAmelCase = fft_window_size
lowerCAmelCase = (fft_window_size >> 1) + 1
lowerCAmelCase = hop_length
lowerCAmelCase = max_length_s
lowerCAmelCase = max_length_s * sampling_rate
lowerCAmelCase = sampling_rate
lowerCAmelCase = frequency_min
lowerCAmelCase = frequency_max
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm=_snake_case , mel_scale='htk' , )
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_snake_case , min_frequency=_snake_case , max_frequency=_snake_case , sampling_rate=_snake_case , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = spectrogram(
_snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase = [0]
# randomly choose index for each part
lowerCAmelCase = np.random.choice(ranges[0] )
lowerCAmelCase = np.random.choice(ranges[1] )
lowerCAmelCase = np.random.choice(ranges[2] )
lowerCAmelCase = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase = torch.tensor(mel[None, None, :] )
lowerCAmelCase = torch.nn.functional.interpolate(
_snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=_snake_case )
lowerCAmelCase = mel_shrink[0][0].numpy()
lowerCAmelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase = len(_snake_case ) - max_length
lowerCAmelCase = np.random.randint(0 , overflow + 1 )
lowerCAmelCase = waveform[idx : idx + max_length]
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters )
lowerCAmelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase = False
else:
lowerCAmelCase = self._random_mel_fusion(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
lowerCAmelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase = int(max_length / len(_snake_case ) )
lowerCAmelCase = np.stack(np.tile(_snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase = int(max_length / len(_snake_case ) )
lowerCAmelCase = np.stack(np.tile(_snake_case , _snake_case ) )
lowerCAmelCase = np.pad(_snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters )
lowerCAmelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase = self._np_extract_fbank_features(_snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = truncation if truncation is not None else self.truncation
lowerCAmelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
lowerCAmelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray(_snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase = [
self._get_input_mel(_snake_case , max_length if max_length else self.nb_max_samples , _snake_case , _snake_case )
for waveform in raw_speech
]
lowerCAmelCase = []
lowerCAmelCase = []
for mel, longer in padded_inputs:
input_mel.append(_snake_case )
is_longer.append(_snake_case )
if truncation == "fusion" and sum(_snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase = np.random.randint(0 , len(_snake_case ) )
lowerCAmelCase = True
if isinstance(input_mel[0] , _snake_case ):
lowerCAmelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase = [[longer] for longer in is_longer]
lowerCAmelCase = {'input_features': input_mel, 'is_longer': is_longer}
lowerCAmelCase = BatchFeature(_snake_case )
if return_tensors is not None:
lowerCAmelCase = input_features.convert_to_tensors(_snake_case )
return input_features
| 4
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 658
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" ,_lowerCAmelCase ,)
super().__init__(*_lowerCAmelCase ,**_lowerCAmelCase )
| 716
|
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
lowerCamelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCamelCase__ = {}
lowerCamelCase__ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCamelCase__ = {}
lowerCamelCase__ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCamelCase__ = state_dict[key]
lowerCamelCase__ = config.model.params.first_stage_config.params
lowerCamelCase__ = config.model.params.unet_config.params
lowerCamelCase__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCamelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
UpperCamelCase : List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 9
| 0
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_a : int = logging.get_logger(__name__)
@add_end_docstrings(__a )
class _UpperCAmelCase ( __a ):
def __init__( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
requires_backends(self,"""vision""" )
requires_backends(self,"""torch""" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowerCAmelCase__ )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {}
__lowerCAmelCase = {}
__lowerCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCAmelCase = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__lowerCAmelCase = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__lowerCAmelCase = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__lowerCAmelCase = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__lowerCAmelCase = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCAmelCase = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__lowerCAmelCase = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__lowerCAmelCase = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self,__SCREAMING_SNAKE_CASE,*__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return super().__call__(lowerCAmelCase__,*lowerCAmelCase__,num_workers=lowerCAmelCase__,batch_size=lowerCAmelCase__,**lowerCAmelCase__ )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE = 0,__SCREAMING_SNAKE_CASE = 5_12 / 15_00,__SCREAMING_SNAKE_CASE = 32,__SCREAMING_SNAKE_CASE = 1,):
'''simple docstring'''
__lowerCAmelCase = load_image(lowerCAmelCase__ )
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.generate_crop_boxes(
lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = self.image_processor(images=lowerCAmelCase__,return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__lowerCAmelCase = self.get_inference_context()
with inference_context():
__lowerCAmelCase = self._ensure_tensor_on_device(lowerCAmelCase__,device=self.device )
__lowerCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__lowerCAmelCase = image_embeddings
__lowerCAmelCase = grid_points.shape[1]
__lowerCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0,lowerCAmelCase__,lowerCAmelCase__ ):
__lowerCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCAmelCase = input_labels[:, i : i + points_per_batch]
__lowerCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0.88,__SCREAMING_SNAKE_CASE=0.95,__SCREAMING_SNAKE_CASE=0,__SCREAMING_SNAKE_CASE=1,):
'''simple docstring'''
__lowerCAmelCase = model_inputs.pop("""input_boxes""" )
__lowerCAmelCase = model_inputs.pop("""is_last""" )
__lowerCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
__lowerCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__lowerCAmelCase = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCAmelCase = model_outputs["""pred_masks"""]
__lowerCAmelCase = self.image_processor.post_process_masks(
lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,binarize=lowerCAmelCase__ )
__lowerCAmelCase = model_outputs["""iou_scores"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.filter_masks(
masks[0],iou_scores[0],original_sizes[0],input_boxes[0],lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=0.7,):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__lowerCAmelCase = torch.cat(lowerCAmelCase__ )
__lowerCAmelCase = torch.cat(lowerCAmelCase__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
__lowerCAmelCase = {}
if output_rle_mask:
__lowerCAmelCase = rle_mask
if output_bboxes_mask:
__lowerCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 689
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
create_all_state(1 , UpperCamelCase_ , UpperCamelCase_ , [] , UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase_ , total_number - level + 2 ):
current_list.append(UpperCamelCase_ )
create_all_state(i + 1 , UpperCamelCase_ , level - 1 , UpperCamelCase_ , UpperCamelCase_ )
current_list.pop()
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in total_list:
print(*UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 155
| 0
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_labels
lowerCAmelCase = use_mc_token_ids
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = self.vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
if self.use_mc_token_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = CTRLModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = CTRLLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.num_labels
lowerCAmelCase = CTRLForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _snake_case ( a_ , a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : str = (CTRLLMHeadModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = CTRLModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = CTRLModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@require_torch
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # Legal the president is
lowerCAmelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
| 718
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[str] = RoCBertTokenizer
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = filter_non_english
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCAmelCase = {}
lowerCAmelCase = {}
for i, value in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = i
lowerCAmelCase = i
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = i
lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ['的', '人', '有']
lowerCAmelCase = ''.join(_SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE )
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.encode('你好' , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = '你好,你是谁'
lowerCAmelCase = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.prepare_for_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 514
| 0
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase = 'src/diffusers'
__lowerCAmelCase = '.'
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCAmelCase = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCAmelCase = spec.loader.load_module()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , _snake_case ) is not None
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = object_name.split(""".""" )
_snake_case = 0
# First let's find the module where our object lives.
_snake_case = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case , f"""{module}.py""" ) ):
i += 1
if i < len(_snake_case ):
_snake_case = os.path.join(_snake_case , parts[i] )
if i >= len(_snake_case ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_snake_case , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case = f.readlines()
# Now let's find the class / func in the code!
_snake_case = """"""
_snake_case = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_snake_case = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index] , _snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
return "".join(_snake_case )
__lowerCAmelCase = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
__lowerCAmelCase = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
__lowerCAmelCase = re.compile(r'<FILL\s+[^>]*>')
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = code.split("""\n""" )
_snake_case = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = len(get_indent(_snake_case ) ) > 0
if has_indent:
_snake_case = f"""class Bla:\n{code}"""
_snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_snake_case )
_snake_case = black.format_str(_snake_case , mode=_snake_case )
_snake_case, _snake_case = style_docstrings_in_code(_snake_case )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case = f.readlines()
_snake_case = []
_snake_case = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
_snake_case = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_snake_case, _snake_case, _snake_case = search.groups()
_snake_case = find_code_in_diffusers(_snake_case )
_snake_case = get_indent(_snake_case )
_snake_case = line_index + 1 if indent == theoretical_indent else line_index + 2
_snake_case = theoretical_indent
_snake_case = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_snake_case = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
_snake_case = lines[line_index]
_snake_case = _should_continue(_snake_case , _snake_case ) and re.search(f"""^{indent}# End copy""" , _snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
_snake_case = """""".join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
_snake_case = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_snake_case ) is None]
_snake_case = """\n""".join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
_snake_case = replace_pattern.replace("""with""" , """""" ).split(""",""" )
_snake_case = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_snake_case, _snake_case, _snake_case = pattern.groups()
_snake_case = re.sub(_snake_case , _snake_case , _snake_case )
if option.strip() == "all-casing":
_snake_case = re.sub(obja.lower() , obja.lower() , _snake_case )
_snake_case = re.sub(obja.upper() , obja.upper() , _snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_snake_case = blackify(lines[start_index - 1] + theoretical_code )
_snake_case = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_snake_case = lines[:start_index] + [theoretical_code] + lines[line_index:]
_snake_case = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_snake_case , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_snake_case )
return diffs
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = False ):
_snake_case = glob.glob(os.path.join(_snake_case , """**/*.py""" ) , recursive=_snake_case )
_snake_case = []
for filename in all_files:
_snake_case = is_copy_consistent(_snake_case , _snake_case )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
_snake_case = """\n""".join(_snake_case )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 585
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7
| 0
|
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : str=None , __magic_name__ : List[str]=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__( self : Optional[Any] ) -> Optional[Any]:
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int , __magic_name__ : Sequence , __magic_name__ : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(__magic_name__ ) - 1 )
def __A ( self : str , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> List[str]:
self._update_tree(self.root , __magic_name__ , __magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> List[Any]:
return self._query_range(self.root , __magic_name__ , __magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> Optional[Any]:
if start == end:
return SegmentTreeNode(__magic_name__ , __magic_name__ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , __magic_name__ )
return SegmentTreeNode(__magic_name__ , __magic_name__ , self.fn(left.val , right.val ) , __magic_name__ , __magic_name__ )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[str] ) -> Optional[Any]:
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , __magic_name__ , __magic_name__ )
else:
self._update_tree(node.right , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def __A ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any ) -> int:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __magic_name__ , __magic_name__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __magic_name__ , node.mid ) , self._query_range(node.right , node.mid + 1 , __magic_name__ ) , )
else:
# range in right child tree
return self._query_range(node.right , __magic_name__ , __magic_name__ )
def __A ( self : Optional[int] ) -> str:
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
A : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 710
|
from __future__ import annotations
def a__ ( __UpperCamelCase ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
| 0
|
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__A : Any = [True] * (num + 1)
__A : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,a__ ):
__A : Tuple = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : str = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 17
|
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 1
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class A_(__A , unittest.TestCase ):
"""simple docstring"""
a_ : Union[str, Any] = XGLMTokenizer
a_ : Tuple = XGLMTokenizerFast
a_ : Optional[int] = True
a_ : Union[str, Any] = True
def _lowerCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = XGLMTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = '<pad>'
_lowerCamelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(A ) , 1008 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = XGLMTokenizer(A , keep_accents=A )
_lowerCamelCase : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _lowerCAmelCase ( self ):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def _lowerCAmelCase ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
_lowerCamelCase : int = XGLMTokenizer(f.name , keep_accents=A )
_lowerCamelCase : str = pickle.dumps(A )
pickle.loads(A )
def _lowerCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Dict = self.get_rust_tokenizer()
_lowerCamelCase : Union[str, Any] = 'I was born in 92000, and this is falsé.'
_lowerCamelCase : str = tokenizer.tokenize(A )
_lowerCamelCase : Tuple = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
_lowerCamelCase : Dict = tokenizer.encode(A , add_special_tokens=A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCamelCase : List[Any] = tokenizer.encode(A )
_lowerCamelCase : List[Any] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = 'Hello World!'
_lowerCamelCase : Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_lowerCamelCase : Optional[int] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = {
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='facebook/xglm-564M' , padding=A , )
| 704
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : Any = DebertaTokenizer
a_ : Dict = True
a_ : Optional[Any] = DebertaTokenizerFast
def _lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
_lowerCamelCase : int = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCamelCase : List[str] = {'unk_token': '[UNK]'}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def _lowerCAmelCase ( self , **A ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , A ):
_lowerCamelCase : Union[str, Any] = 'lower newer'
_lowerCamelCase : Optional[int] = 'lower newer'
return input_text, output_text
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = 'lower newer'
_lowerCamelCase : Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = tokenizer('Hello' , 'World' )
_lowerCamelCase : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , A )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCamelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=A )
_lowerCamelCase : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=A )
_lowerCamelCase : str = tokenizer.encode(
'sequence builders' , add_special_tokens=A , add_prefix_space=A )
_lowerCamelCase : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=A , add_prefix_space=A )
_lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(A )
_lowerCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowerCamelCase : List[Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
_lowerCamelCase : Optional[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
_lowerCamelCase : Optional[Any] = tokenizer(A , padding=A )
_lowerCamelCase : List[Any] = [tokenizer.decode(A , skip_special_tokens=A ) for seq in encoding['input_ids']]
# fmt: off
_lowerCamelCase : Optional[Any] = {
'input_ids': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCamelCase : Tuple = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , A )
for expected, decoded in zip(A , A ):
self.assertEqual(A , A )
| 349
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '▁'
UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
UpperCamelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
UpperCamelCase = {'vinai/bartpho-syllable': 1024}
class UpperCamelCase__ ( snake_case_ ):
"""simple docstring"""
A__ : Dict = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Any = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
A__ = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(lowerCamelCase__ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self ) -> Dict:
return len(self.fairseq_ids_to_tokens )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = "".join(lowerCamelCase__ ).replace(lowerCamelCase__ , " " ).strip()
return out_string
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[int]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(lowerCamelCase__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 104
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int):
UpperCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 212
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
UpperCAmelCase = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = """ Hello world! cécé herlolip"""
UpperCAmelCase = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE )
snake_case_ = val
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
snake_case_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
snake_case_ = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None )-> Any:
"""simple docstring"""
if not os.path.exists(SCREAMING_SNAKE_CASE ):
snake_case_ = torch.hub.load('''pytorch/fairseq''' , SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = load_xsum_checkpoint(SCREAMING_SNAKE_CASE )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
snake_case_ = checkpoint_path.replace('''.''' , '''-''' )
snake_case_ = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case_ = bart.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
snake_case_ = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ).encode(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
snake_case_ = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
snake_case_ = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = BartForSequenceClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
snake_case_ = bart.predict('''mnli''' , SCREAMING_SNAKE_CASE , return_logits=SCREAMING_SNAKE_CASE )
snake_case_ = model(SCREAMING_SNAKE_CASE )[0] # logits
else: # no classification heads to worry about
snake_case_ = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
snake_case_ = state_dict['''decoder.embed_tokens.weight''']
snake_case_ = bart.extract_features(SCREAMING_SNAKE_CASE )
if hf_checkpoint_name == "facebook/bart-large":
snake_case_ = BartModel(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
snake_case_ = model(SCREAMING_SNAKE_CASE ).model[0]
else:
snake_case_ = BartForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , '''lm_head''' ):
snake_case_ = make_linear_from_emb(model.model.shared )
snake_case_ = model.model(SCREAMING_SNAKE_CASE )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 531
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCAmelCase = """pytorch_model.bin"""
UpperCAmelCase = """pytorch_model.bin.index.json"""
UpperCAmelCase = """adapter_config.json"""
UpperCAmelCase = """adapter_model.bin"""
UpperCAmelCase = """adapter_model.safetensors"""
UpperCAmelCase = """tf_model.h5"""
UpperCAmelCase = """tf_model.h5.index.json"""
UpperCAmelCase = """model.ckpt"""
UpperCAmelCase = """flax_model.msgpack"""
UpperCAmelCase = """flax_model.msgpack.index.json"""
UpperCAmelCase = """model.safetensors"""
UpperCAmelCase = """model.safetensors.index.json"""
UpperCAmelCase = """config.json"""
UpperCAmelCase = """preprocessor_config.json"""
UpperCAmelCase = FEATURE_EXTRACTOR_NAME
UpperCAmelCase = """generation_config.json"""
UpperCAmelCase = """modelcard.json"""
UpperCAmelCase = """▁"""
UpperCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if version.parse(SCREAMING_SNAKE_CASE ) < version.parse(SCREAMING_SNAKE_CASE ):
if "dev" in min_version:
snake_case_ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
snake_case_ = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 531
| 1
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int] ) -> str:
"""simple docstring"""
A__ = [0] * len(UpperCAmelCase_ )
A__ = []
A__ = [1] * len(UpperCAmelCase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(UpperCAmelCase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCAmelCase_ )
print(max(UpperCAmelCase_ ) )
# Adjacency list of Graph
UpperCamelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 104
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase__ =logging.getLogger(__name__)
UpperCAmelCase__ ="Hello world! cécé herlolip"
UpperCAmelCase__ =namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir=""".""" , finetune_bert=UpperCamelCase__ , large=UpperCamelCase__ , share_emb=UpperCamelCase__ , use_bert_emb=UpperCamelCase__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(UpperCamelCase__ , lambda UpperCamelCase__ , UpperCamelCase__ : storage )
__lowercase = AbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) , UpperCamelCase__ )
original.eval()
__lowercase = BertAbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
__lowercase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
__lowercase = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase__ )) )
__lowercase = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
__lowercase = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase__ )) )
__lowercase = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
__lowercase = original.generator(UpperCamelCase__ )
__lowercase = new_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
__lowercase = new_model.generator(UpperCamelCase__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
__lowercase = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
UpperCAmelCase__ =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 616
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : List[str]=1_8 , SCREAMING_SNAKE_CASE : str=3_0 , SCREAMING_SNAKE_CASE : Optional[Any]=4_0_0 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=True , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = size if size is not None else {"height": 1_8, "width": 1_8}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
def __A ( self : Tuple ) -> List[str]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = ImageGPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "clusters" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
def __A ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def __A ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , obj[key] ) )
else:
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE )
def __A ( self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , "image_processor.json" )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE ).to_dict()
lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
def __A ( self : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE ).to_dict()
lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , SCREAMING_SNAKE_CASE )
@unittest.skip("ImageGPT requires clusters at initialization" )
def __A ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def __a ( ) -> List[str]:
lowerCAmelCase = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
lowerCAmelCase = Image.open(dataset[4]["file"] )
lowerCAmelCase = Image.open(dataset[5]["file"] )
lowerCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : int ) -> Any:
"""simple docstring"""
lowerCAmelCase = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowerCAmelCase = prepare_images()
# test non-batched
lowerCAmelCase = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
lowerCAmelCase = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , SCREAMING_SNAKE_CASE )
# test batched
lowerCAmelCase = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
lowerCAmelCase = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , SCREAMING_SNAKE_CASE )
| 159
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( A__ ) -> Dict:
lowerCAmelCase , lowerCAmelCase = image.size
lowerCAmelCase , lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
lowerCAmelCase = np.array(A__ ).astype(np.floataa ) / 255.0
lowerCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : VQModel , SCREAMING_SNAKE_CASE : UNetaDModel , SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : Optional[int] = 1_0_0 , SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowerCAmelCase = 1
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCAmelCase = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE )}" )
if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowerCAmelCase = preprocess(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase = next(self.unet.parameters() ).dtype
lowerCAmelCase = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE )
lowerCAmelCase = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=self.device )
lowerCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase = {}
if accepts_eta:
lowerCAmelCase = eta
for t in self.progress_bar(SCREAMING_SNAKE_CASE ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase = torch.cat([latents, image] , dim=1 )
lowerCAmelCase = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# predict the noise residual
lowerCAmelCase = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE ).sample
lowerCAmelCase = torch.clamp(SCREAMING_SNAKE_CASE , -1.0 , 1.0 )
lowerCAmelCase = image / 2 + 0.5
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 159
| 1
|
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( snake_case__ :float , snake_case__ :float , snake_case__ :float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case__ , 2 ) + pow(snake_case__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = "naver-clova-ix/donut-base-finetuned-docvqa"
_A = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_A = "document_qa"
_A = AutoProcessor
_A = VisionEncoderDecoderModel
_A = ["image", "text"]
_A = ["text"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
_a = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_a = task_prompt.replace('{user_input}' , SCREAMING_SNAKE_CASE_ )
_a = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_ids
_a = self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , ).sequences
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
_a = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_a = re.sub(R'<.*?>' , '' , SCREAMING_SNAKE_CASE_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE_ )
return sequence["answer"]
| 562
| 0
|
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = val
snake_case_ = None
snake_case_ = None
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if self.val:
if val < self.val:
if self.left is None:
snake_case_ = Node(_UpperCAmelCase )
else:
self.left.insert(_UpperCAmelCase )
elif val > self.val:
if self.right is None:
snake_case_ = Node(_UpperCAmelCase )
else:
self.right.insert(_UpperCAmelCase )
else:
snake_case_ = val
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
if root:
inorder(root.left , SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return arr
snake_case_ = Node(arr[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
snake_case_ = []
inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 717
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __lowerCAmelCase (SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None )-> Tuple:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = field(
metadata={"help": "The csv file to plot."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Disable logarithmic scale when plotting"} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__snake_case = field(
default=lowerCamelCase__ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__snake_case = list_field(
default=lowerCamelCase__ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = args
snake_case_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
snake_case_ = csv.DictReader(_UpperCAmelCase )
for row in reader:
snake_case_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
snake_case_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
snake_case_ = float(row['''result'''] )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = plt.subplots()
snake_case_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
snake_case_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
snake_case_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
snake_case_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
snake_case_ = self.result_dict[model_name]['''result''']
((snake_case_) , (snake_case_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
snake_case_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
snake_case_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
snake_case_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((snake_case_) , (snake_case_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
snake_case_ = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '''--''' )
title_str += F''' {label_model_name} vs.'''
snake_case_ = title_str[:-4]
snake_case_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __lowerCAmelCase ()-> int:
"""simple docstring"""
snake_case_ = HfArgumentParser(SCREAMING_SNAKE_CASE )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = Plot(args=SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 531
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowercase : Optional[Any] = 'docs/source/en/_toctree.yml'
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = defaultdict(snake_case__ )
A : Dict = []
A : Any = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(snake_case__ )
A : Any = new_doc_list
A : Any = [key for key, value in counts.items() if value > 1]
A : List[Any] = []
for duplicate_key in duplicates:
A : List[Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
A : Union[str, Any] = sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case__ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(snake_case__ )
# Sort
return overview_doc
def lowerCAmelCase_ ( snake_case__=False ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' ) as f:
A : Optional[int] = yaml.safe_load(f.read() )
# Get to the API doc
A : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A : List[str] = content[api_idx]['''sections''']
# Then to the model doc
A : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A : int = api_doc[scheduler_idx]['''sections''']
A : Union[str, Any] = clean_doc_toc(snake_case__ )
A : str = False
if new_scheduler_doc != scheduler_doc:
A : str = True
if overwrite:
A : List[str] = new_scheduler_doc
if diff:
if overwrite:
A : int = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def lowerCAmelCase_ ( snake_case__=False ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' ) as f:
A : str = yaml.safe_load(f.read() )
# Get to the API doc
A : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A : Union[str, Any] = content[api_idx]['''sections''']
# Then to the model doc
A : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A : Optional[int] = False
A : int = api_doc[pipeline_idx]['''sections''']
A : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A : Optional[Any] = pipeline_doc['''section''']
A : Union[str, Any] = clean_doc_toc(snake_case__ )
if overwrite:
A : int = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case__ )
# sort overall pipeline doc
A : Optional[Any] = clean_doc_toc(snake_case__ )
if new_pipeline_docs != pipeline_docs:
A : List[str] = True
if overwrite:
A : Tuple = new_pipeline_docs
if diff:
if overwrite:
A : int = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 634
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = DebertaTokenizer
__magic_name__ = True
__magic_name__ = DebertaTokenizerFast
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
A : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A : Tuple = {'''unk_token''': '''[UNK]'''}
A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Union[str, Any] = '''lower newer'''
A : List[Any] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.get_tokenizer()
A : List[str] = '''lower newer'''
A : int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : str = tokens + [tokenizer.unk_token]
A : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = self.get_tokenizer()
A : Tuple = tokenizer('''Hello''' , '''World''' )
A : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A : List[str] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A : Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A : List[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
A : List[str] = [tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
A : List[str] = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE )
for expected, decoded in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 634
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__SCREAMING_SNAKE_CASE = {
'google/reformer-crime-and-punishment': 524_288,
}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[Any]="</s>" , _lowerCamelCase :Tuple="<unk>" , _lowerCamelCase :List[str]=[] , _lowerCamelCase :Optional[Dict[str, Any]] = None , **_lowerCamelCase :Optional[Any] , ):
'''simple docstring'''
UpperCamelCase_ : List[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCamelCase_ : Dict =vocab_file
UpperCamelCase_ : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] ={self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : str =self.__dict__.copy()
UpperCamelCase_ : List[str] =None
return state
def __setstate__( self :List[str] , _lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : Any =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : Union[str, Any] ={}
UpperCamelCase_ : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self :str , _lowerCamelCase :str ):
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCamelCase )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[Any] ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
UpperCamelCase_ : Dict =self.sp_model.IdToPiece(_lowerCamelCase )
return token
def lowerCamelCase_ ( self :int , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : List[str] =[]
UpperCamelCase_ : List[Any] =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
UpperCamelCase_ : Optional[int] =[]
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def lowerCamelCase_ ( self :str , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ : Dict =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
UpperCamelCase_ : str =self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 395
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def A_ ( __lowercase ):
UpperCamelCase_ : int =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCamelCase_ : Union[str, Any] =1_92
UpperCamelCase_ : Optional[Any] =7_68
UpperCamelCase_ : Optional[int] =12
UpperCamelCase_ : Tuple =3
UpperCamelCase_ : List[str] =[8_00, 13_33]
UpperCamelCase_ : List[str] =False
elif yolos_name == "yolos_s_dWr":
UpperCamelCase_ : str =3_30
UpperCamelCase_ : Dict =14
UpperCamelCase_ : List[str] =6
UpperCamelCase_ : List[str] =13_20
elif "yolos_s" in yolos_name:
UpperCamelCase_ : Optional[int] =3_84
UpperCamelCase_ : str =15_36
UpperCamelCase_ : List[Any] =12
UpperCamelCase_ : Union[str, Any] =6
elif "yolos_b" in yolos_name:
UpperCamelCase_ : Optional[Any] =[8_00, 13_44]
UpperCamelCase_ : Union[str, Any] =91
UpperCamelCase_ : Optional[Any] ='huggingface/label-files'
UpperCamelCase_ : Union[str, Any] ='coco-detection-id2label.json'
UpperCamelCase_ : Optional[Any] =json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str ={int(__lowercase ): v for k, v in idalabel.items()}
UpperCamelCase_ : str =idalabel
UpperCamelCase_ : Optional[int] ={v: k for k, v in idalabel.items()}
return config
def A_ ( __lowercase , __lowercase , __lowercase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase_ : Optional[Any] =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCamelCase_ : str =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : int =in_proj_weight[: config.hidden_size, :]
UpperCamelCase_ : Optional[Any] =in_proj_bias[: config.hidden_size]
UpperCamelCase_ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase_ : Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase_ : Any =in_proj_weight[-config.hidden_size :, :]
UpperCamelCase_ : List[str] =in_proj_bias[-config.hidden_size :]
def A_ ( __lowercase ):
if "backbone" in name:
UpperCamelCase_ : Any =name.replace('backbone' , 'vit' )
if "cls_token" in name:
UpperCamelCase_ : str =name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
UpperCamelCase_ : Optional[Any] =name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
UpperCamelCase_ : List[Any] =name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
UpperCamelCase_ : str =name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCamelCase_ : Dict =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
UpperCamelCase_ : Optional[int] =name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase_ : Optional[Any] =name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase_ : Optional[Any] =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase_ : Tuple =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase_ : List[str] =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase_ : int =name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
UpperCamelCase_ : Any =name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('vit.norm' , 'vit.layernorm' )
return name
def A_ ( __lowercase , __lowercase ):
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : List[str] =orig_state_dict.pop(__lowercase )
if "qkv" in key:
UpperCamelCase_ : Any =key.split('.' )
UpperCamelCase_ : List[str] =int(key_split[2] )
UpperCamelCase_ : Union[str, Any] =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCamelCase_ : Optional[int] =val[:dim, :]
UpperCamelCase_ : List[str] =val[
dim : dim * 2, :
]
UpperCamelCase_ : Tuple =val[-dim:, :]
else:
UpperCamelCase_ : Optional[Any] =val[:dim]
UpperCamelCase_ : Optional[Any] =val[dim : dim * 2]
UpperCamelCase_ : Union[str, Any] =val[-dim:]
else:
UpperCamelCase_ : Dict =val
return orig_state_dict
def A_ ( ):
UpperCamelCase_ : str ='http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : Optional[int] =Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase = False ):
UpperCamelCase_ : int =get_yolos_config(__lowercase )
# load original state_dict
UpperCamelCase_ : Union[str, Any] =torch.load(__lowercase , map_location='cpu' )['model']
# load 🤗 model
UpperCamelCase_ : Dict =YolosForObjectDetection(__lowercase )
model.eval()
UpperCamelCase_ : Union[str, Any] =convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCamelCase_ : Any =8_00 if yolos_name != 'yolos_ti' else 5_12
UpperCamelCase_ : List[Any] =YolosImageProcessor(format='coco_detection' , size=__lowercase )
UpperCamelCase_ : Union[str, Any] =image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase_ : int =model(**__lowercase )
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =outputs.logits, outputs.pred_boxes
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =None, None
if yolos_name == "yolos_ti":
UpperCamelCase_ : List[str] =torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCamelCase_ : str =torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCamelCase_ : Tuple =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCamelCase_ : Any =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCamelCase_ : Optional[Any] =torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCamelCase_ : Tuple =torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCamelCase_ : Union[str, Any] =torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCamelCase_ : Union[str, Any] =torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCamelCase_ : Tuple =torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCamelCase_ : Optional[Any] =torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
UpperCamelCase_ : Tuple ={
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
UpperCamelCase_ : Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__lowercase , organization='hustvl' )
model.push_to_hub(__lowercase , organization='hustvl' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 395
| 1
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase :
"""simple docstring"""
@staticmethod
def A_ ( *_UpperCAmelCase : str, **_UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( SCREAMING_SNAKE_CASE__ : Image ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _a ( SCREAMING_SNAKE_CASE__ : Image ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = np.array(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCAmelCase_ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def A_ ( self : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = MaskGenerationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def A_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = pipeline("mask-generation", model="facebook/sam-vit-huge" )
SCREAMING_SNAKE_CASE__ : Dict = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=2_5_6 )
# Shortening by hashing
SCREAMING_SNAKE_CASE__ : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9967},
{"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9909},
{"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9879},
{"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9834},
{"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9716},
{"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9612},
{"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9552},
{"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9532},
{"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9516},
{"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9499},
{"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9483},
{"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9464},
{"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9408},
{"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9335},
{"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9326},
{"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9262},
{"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8999},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8986},
{"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8984},
{"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8873},
{"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8871}
], )
# fmt: on
@require_torch
@slow
def A_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "facebook/sam-vit-huge"
SCREAMING_SNAKE_CASE__ : List[Any] = pipeline("mask-generation", model=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=2_5_6 )
# Shortening by hashing
SCREAMING_SNAKE_CASE__ : List[str] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0210},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053},
], )
| 663
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_UpperCAmelCase )
return config
def A_ ( self : Tuple ) -> int:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def A_ ( self : int ) -> int:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : int = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 663
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__snake_case = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
lowercase__ : Any = {}
with open(lowerCamelCase__ , """r""" ) as file:
for line_number, line in enumerate(lowerCamelCase__ ):
lowercase__ : List[str] = line.strip()
if line:
lowercase__ : List[str] = line.split()
lowercase__ : Optional[int] = line_number
lowercase__ : Union[str, Any] = words[0]
lowercase__ : Optional[int] = value
return result
def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ):
for attribute in key.split(""".""" ):
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase__ ):
lowercase__ : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowercase__ : int = """param"""
if weight_type is not None and weight_type != "param":
lowercase__ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
lowercase__ : Optional[int] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
lowercase__ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[Any] = shape_pointer.shape
# let's reduce dimension
lowercase__ : Union[str, Any] = value[0]
else:
lowercase__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ : List[str] = value
elif weight_type == "weight_g":
lowercase__ : Any = value
elif weight_type == "weight_v":
lowercase__ : Optional[Any] = value
elif weight_type == "bias":
lowercase__ : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = value
else:
lowercase__ : Optional[int] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
lowercase__ : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase__ ):
lowercase__ : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowercase__ : Tuple = """param"""
if weight_type is not None and weight_type != "param":
lowercase__ : Dict = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase__ : Tuple = """.""".join([key, hf_param_name] )
else:
lowercase__ : Dict = key
lowercase__ : int = value if """lm_head""" in full_key else value[0]
__snake_case = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Any=None ):
lowercase__ : str = False
for key, mapped_key in MAPPING.items():
lowercase__ : Tuple = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase__ : List[Any] = True
if "*" in mapped_key:
lowercase__ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowercase__ : Tuple = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
lowercase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowercase__ : List[Any] = """weight_v"""
elif "bias" in name:
lowercase__ : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : List[str] = """weight"""
else:
lowercase__ : Optional[int] = None
if hf_dict is not None:
rename_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return is_used
return is_used
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ):
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = fairseq_model.state_dict()
lowercase__ : List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ : Dict = True
else:
lowercase__ : int = load_wavaveca_layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ):
lowercase__ : Any = full_name.split("""conv_layers.""" )[-1]
lowercase__ : List[str] = name.split(""".""" )
lowercase__ : Optional[Any] = int(items[0] )
lowercase__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[int]=False ):
if config_path is not None:
lowercase__ : Optional[Any] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
else:
lowercase__ : Any = WavaVecaConfig()
if is_seq_class:
lowercase__ : int = read_txt_into_dict(lowerCamelCase__ )
lowercase__ : List[str] = idalabel
lowercase__ : Union[str, Any] = WavaVecaForSequenceClassification(lowerCamelCase__ )
lowercase__ : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
feature_extractor.save_pretrained(lowerCamelCase__ )
elif is_finetuned:
if dict_path:
lowercase__ : List[Any] = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : Optional[Any] = target_dict.pad_index
lowercase__ : Any = target_dict.bos_index
lowercase__ : Any = target_dict.eos_index
lowercase__ : str = len(target_dict.symbols )
lowercase__ : Tuple = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowercase__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : List[str] = 0
lowercase__ : List[str] = 1
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[int] = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
lowercase__ : Optional[int] = True if config.feat_extract_norm == """layer""" else False
lowercase__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowercase__ : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowercase__ : List[str] = WavaVecaForCTC(lowerCamelCase__ )
else:
lowercase__ : Optional[Any] = WavaVecaForPreTraining(lowerCamelCase__ )
if is_finetuned or is_seq_class:
lowercase__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase__ : int = argparse.Namespace(task="""audio_pretraining""" )
lowercase__ : Tuple = fairseq.tasks.setup_task(lowerCamelCase__ )
lowercase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
lowercase__ : Any = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__snake_case = parser.parse_args()
__snake_case = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 708
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__snake_case = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase__ : List[Any] = """lm_head"""
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowercase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ : Dict = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : str = value
elif weight_type == "bias":
lowercase__ : int = value
else:
lowercase__ : Optional[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[str] ):
lowercase__ : Tuple = []
lowercase__ : Dict = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ : int = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase__ : Dict = True
if "*" in mapped_key:
lowercase__ : List[str] = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
lowercase__ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
lowercase__ : Any = """weight_g"""
elif "weight_v" in name:
lowercase__ : Any = """weight_v"""
elif "bias" in name:
lowercase__ : List[str] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : str = """weight"""
else:
lowercase__ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ):
lowercase__ : Dict = full_name.split("""conv_layers.""" )[-1]
lowercase__ : Union[str, Any] = name.split(""".""" )
lowercase__ : List[Any] = int(items[0] )
lowercase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : List[Any]=True ):
if config_path is not None:
lowercase__ : int = UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
lowercase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase__ : Any = Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : int = target_dict.pad_index
lowercase__ : Tuple = target_dict.bos_index
lowercase__ : Dict = target_dict.eos_index
lowercase__ : Dict = len(target_dict.symbols )
lowercase__ : List[Any] = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowercase__ : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : Any = 42
lowercase__ : List[str] = 43
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
lowercase__ : List[str] = True if config.feat_extract_norm == """layer""" else False
lowercase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowercase__ : Any = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowercase__ : Any = UniSpeechForCTC(lowerCamelCase__ )
else:
lowercase__ : Dict = UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowercase__ , lowercase__ , lowercase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase__ : str = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__snake_case = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 128
| 0
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = torch.load(__snake_case, map_location='''cpu''' )
_UpperCamelCase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
_UpperCamelCase = mam_aaa['''model''']
remove_ignore_keys_(__snake_case )
_UpperCamelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_UpperCamelCase = MaMaaaConfig(
vocab_size=__snake_case, max_position_embeddings=10_24, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
_UpperCamelCase = state_dict['''decoder.embed_tokens.weight''']
_UpperCamelCase = MaMaaaForConditionalGeneration(__snake_case )
model.model.load_state_dict(__snake_case, strict=__snake_case )
_UpperCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_a = parser.parse_args()
_a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 19
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCamelCase__ = random.Random()
def lowerCAmelCase__ ( a__ , a__=1.0 , a__=None , a__=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
_UpperCamelCase = global_rng
_UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str]=7 , lowercase_ : List[str]=400 , lowercase_ : str=2000 , lowercase_ : Optional[int]=24 , lowercase_ : Optional[int]=24 , lowercase_ : Tuple=0.0 , lowercase_ : Any=16000 , lowercase_ : Any=True , lowercase_ : Optional[int]=True , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = min_seq_length
_UpperCamelCase = max_seq_length
_UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase = feature_size
_UpperCamelCase = num_mel_bins
_UpperCamelCase = padding_value
_UpperCamelCase = sampling_rate
_UpperCamelCase = return_attention_mask
_UpperCamelCase = do_normalize
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Tuple , lowercase_ : Optional[Any]=False , lowercase_ : int=False) -> Dict:
"""simple docstring"""
def _flatten(lowercase_ : str):
return list(itertools.chain(*lowercase_))
if equal_length:
_UpperCamelCase = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
_UpperCamelCase = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
_UpperCamelCase = [np.asarray(lowercase_) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = SpeechaTextFeatureExtractionTester(self)
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> int:
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowercase_ , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0) - 1) < 1e-3))
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_UpperCamelCase = [np.asarray(lowercase_) for speech_input in speech_inputs]
# Test feature size
_UpperCamelCase = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)
# Test not batched input
_UpperCamelCase = feature_extractor(speech_inputs[0] , return_tensors="np").input_features
_UpperCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np").input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# Test batched
_UpperCamelCase = feature_extractor(lowercase_ , return_tensors="np").input_features
_UpperCamelCase = feature_extractor(lowercase_ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
_UpperCamelCase = [floats_list((1, x))[0] for x in (800, 800, 800)]
_UpperCamelCase = np.asarray(lowercase_)
_UpperCamelCase = feature_extractor(lowercase_ , return_tensors="np").input_features
_UpperCamelCase = feature_extractor(lowercase_ , return_tensors="np").input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3))
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCamelCase = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_UpperCamelCase = ["longest", "max_length", "do_not_pad"]
_UpperCamelCase = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_):
_UpperCamelCase = feature_extractor(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_)
_UpperCamelCase = inputs.input_features
_UpperCamelCase = inputs.attention_mask
_UpperCamelCase = [np.sum(lowercase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCamelCase = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_UpperCamelCase = ["longest", "max_length", "do_not_pad"]
_UpperCamelCase = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_):
_UpperCamelCase = feature_extractor(
lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_)
_UpperCamelCase = inputs.input_features
_UpperCamelCase = inputs.attention_mask
_UpperCamelCase = [np.sum(lowercase_) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]])
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]])
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]])
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCamelCase = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_UpperCamelCase = feature_extractor(
lowercase_ , padding="max_length" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
_UpperCamelCase = inputs.input_features
_UpperCamelCase = inputs.attention_mask
_UpperCamelCase = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1])
self._check_zero_mean_unit_variance(input_features[2])
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCamelCase = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_UpperCamelCase = feature_extractor(
lowercase_ , padding="longest" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
_UpperCamelCase = inputs.input_features
_UpperCamelCase = inputs.attention_mask
_UpperCamelCase = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24))
_UpperCamelCase = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
_UpperCamelCase = feature_extractor(
lowercase_ , padding="longest" , max_length=16 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
_UpperCamelCase = inputs.input_features
_UpperCamelCase = inputs.attention_mask
_UpperCamelCase = np.sum(attention_mask == 1 , axis=1)
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]])
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]])
self._check_zero_mean_unit_variance(input_features[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24))
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
import torch
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCamelCase = np.random.rand(100 , 32).astype(np.floataa)
_UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.floataa)
_UpperCamelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def __UpperCAmelCase ( self : str , lowercase_ : List[str]) -> Dict:
"""simple docstring"""
from datasets import load_dataset
_UpperCamelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
_UpperCamelCase = ds.sort("id").select(range(lowercase_))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
])
# fmt: on
_UpperCamelCase = self._load_datasamples(1)
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_UpperCamelCase = feature_extractor(lowercase_ , return_tensors="pt").input_features
self.assertEquals(input_features.shape , (1, 584, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1e-4))
| 547
| 0
|
def __lowercase ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowercase ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from functools import reduce
lowerCAmelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __lowercase ( _UpperCAmelCase = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 576
| 0
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCamelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCamelCase = [ord(letter) for letter in string.ascii_lowercase]
__lowerCamelCase = {ord(char) for char in VALID_CHARS}
__lowerCamelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a ( __UpperCAmelCase : list[int] , __UpperCAmelCase : tuple[int, ...] ) -> str | None:
__magic_name__: str = ""
__magic_name__: int
__magic_name__: int
__magic_name__: int
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
__magic_name__: str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def a ( __UpperCAmelCase : list[int] ) -> list[str]:
__magic_name__: list[str] = []
for key in product(__UpperCAmelCase , repeat=3 ):
__magic_name__: List[str] = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def a ( __UpperCAmelCase : list[str] , __UpperCAmelCase : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def a ( __UpperCAmelCase : str = "p059_cipher.txt" ) -> int:
__magic_name__: list[int]
__magic_name__: list[str]
__magic_name__: str
__magic_name__: str
__magic_name__: str = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding="""utf-8""" )
__magic_name__: Dict = [int(__UpperCAmelCase ) for number in data.strip().split(""",""" )]
__magic_name__: Optional[int] = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
__magic_name__: Optional[Any] = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
__magic_name__: Tuple = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 96
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96
| 1
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : list[list[int | float]] ) -> int:
"""simple docstring"""
A__ = len(_lowercase )
A__ = len(matrix[0] )
A__ = min(_lowercase, _lowercase )
for row in range(_lowercase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1, _lowercase ):
A__ = matrix[col][row] / matrix[row][row]
for i in range(_lowercase, _lowercase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
A__ = True
for i in range(row + 1, _lowercase ):
if matrix[i][row] != 0:
A__ = matrix[i], matrix[row]
A__ = False
break
if reduce:
rank -= 1
for i in range(_lowercase ):
A__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__snake_case : Optional[Any] = logging.get_logger(__name__)
def a_ ( __a ):
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : Optional[int] =["pixel_values"]
def __init__( self : Union[str, Any] , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 2_5_5 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , **_lowerCamelCase : List[Any] , ):
super().__init__(**_lowerCamelCase )
A__ = size if size is not None else {'''shortest_edge''': 2_5_6}
A__ = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
A__ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
A__ = get_size_dict(_lowerCamelCase , param_name='''crop_size''' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self : Any , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[Any] , ):
A__ = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(_lowerCamelCase , size['''shortest_edge'''] , default_to_square=_lowerCamelCase )
elif "height" in size and "width" in size:
A__ = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def A__ ( self : List[str] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[Any] , ):
A__ = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def A__ ( self : Dict , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[int, float] , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Any , ):
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def A__ ( self : Any , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Union[str, Any] , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def A__ ( self : Tuple , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = None , _lowerCamelCase : float = None , _lowerCamelCase : bool = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(_lowerCamelCase )
if do_resize:
A__ = self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase )
if do_center_crop:
A__ = self.center_crop(_lowerCamelCase , size=_lowerCamelCase )
if do_rescale:
A__ = self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase , offset=_lowerCamelCase )
if do_normalize:
A__ = self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase )
A__ = to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase )
return image
def A__ ( self : Any , _lowerCamelCase : ImageInput , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = None , _lowerCamelCase : float = None , _lowerCamelCase : bool = None , _lowerCamelCase : bool = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCamelCase : List[str] , ):
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(_lowerCamelCase , param_name='''crop_size''' )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
A__ = make_batched(_lowerCamelCase )
A__ = [
[
self._preprocess_image(
image=_lowerCamelCase , do_resize=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , do_center_crop=_lowerCamelCase , crop_size=_lowerCamelCase , do_rescale=_lowerCamelCase , rescale_factor=_lowerCamelCase , offset=_lowerCamelCase , do_normalize=_lowerCamelCase , image_mean=_lowerCamelCase , image_std=_lowerCamelCase , data_format=_lowerCamelCase , )
for img in video
]
for video in videos
]
A__ = {'''pixel_values''': videos}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 571
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( __a ):
return 1 / (1 + np.exp(-z ))
def a_ ( __a , __a ):
return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean()
def a_ ( __a , __a , __a ):
A__ = np.dot(__a , __a )
return np.sum(y * scores - np.log(1 + np.exp(__a ) ) )
def a_ ( __a , __a , __a , __a=7_0000 ):
A__ = np.zeros(x.shape[1] )
for iterations in range(__a ):
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = np.dot(x.T , h - y ) / y.size
A__ = theta - alpha * gradient # updating the weights
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = cost_function(__a , __a )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__snake_case : List[Any] = datasets.load_iris()
__snake_case : List[Any] = iris.data[:, :2]
__snake_case : List[str] = (iris.target != 0) * 1
__snake_case : List[Any] = 0.1
__snake_case : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def a_ ( __a ):
return sigmoid_function(
np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((__snake_case) , (__snake_case)) : Tuple = (x[:, 0].min(), x[:, 0].max())
((__snake_case) , (__snake_case)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__snake_case) , (__snake_case)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__snake_case : int = np.c_[xxa.ravel(), xxa.ravel()]
__snake_case : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 571
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCamelCase : str = None
try:
import msvcrt
except ImportError:
_UpperCamelCase : Union[str, Any] = None
try:
import fcntl
except ImportError:
_UpperCamelCase : Any = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
_UpperCamelCase : List[str] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_UpperCamelCase : List[str] = "3.0.12"
_UpperCamelCase : Union[str, Any] = None
def snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
global _logger
lowerCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class _snake_case ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
lowerCAmelCase = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.lock.release()
return None
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
lowerCAmelCase = self.hash_filename_if_too_long(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# The path to the lock file.
lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase = None
# The default timeout value.
lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase = 0
return None
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._lock_file
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = float(_SCREAMING_SNAKE_CASE )
return None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
raise NotImplementedError()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.05 ):
'''simple docstring'''
if timeout is None:
lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase = id(self )
lowerCAmelCase = self._lock_file
lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(_SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase = id(self )
lowerCAmelCase = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
lowerCAmelCase = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=_SCREAMING_SNAKE_CASE )
return None
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = os.path.basename(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
lowerCAmelCase = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = str(hash(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = filename[: max_length - len(_SCREAMING_SNAKE_CASE ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return path
class _snake_case ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , max_filename_length=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase = os.open(self._lock_file , _SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(_SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = fd
return None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self._lock_file_fd
lowerCAmelCase = None
msvcrt.locking(_SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(_SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _snake_case ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
lowerCAmelCase = os.statvfs(os.path.dirname(_SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , max_filename_length=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase = os.open(self._lock_file , _SCREAMING_SNAKE_CASE )
try:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = fd
return None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self._lock_file_fd
lowerCAmelCase = None
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(_SCREAMING_SNAKE_CASE )
return None
class _snake_case ( a_ ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase = os.open(self._lock_file , _SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
lowerCAmelCase = fd
return None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCamelCase : Any = None
if msvcrt:
_UpperCamelCase : str = WindowsFileLock
elif fcntl:
_UpperCamelCase : Optional[int] = UnixFileLock
else:
_UpperCamelCase : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 705
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if not conversation_id:
lowerCAmelCase = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase = []
if generated_responses is None:
lowerCAmelCase = []
lowerCAmelCase = conversation_id
lowerCAmelCase = past_user_inputs
lowerCAmelCase = generated_responses
lowerCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
lowerCAmelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCAmelCase = text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase = None
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
lowerCAmelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCAmelCase = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
a_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _snake_case ( a_ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase = self.tokenizer.eos_token
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
if min_length_for_response is not None:
lowerCAmelCase = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
lowerCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCAmelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCAmelCase = max_length - minimum_tokens
lowerCAmelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase = model_inputs['attention_mask'][:, -trim:]
lowerCAmelCase = model_inputs.pop('conversation' )
lowerCAmelCase = max_length
lowerCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
lowerCAmelCase = 1
else:
lowerCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
lowerCAmelCase = model_outputs['output_ids']
lowerCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer.eos_token_id
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
lowerCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 514
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
A__ = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=_A , output_all_encodings=_A , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , _A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir() , 'models' )
A__ = _load_vocab(_A , _A , _A , cls=_A )
A__ = nlp.model.BERTModel(
_A , len(_A ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=_A , use_token_type_embed=_A , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=_A , use_decoder=_A , )
original_bort.load_parameters(_A , cast_dtype=_A , ignore_extra=_A )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(_A ),
}
A__ = BertConfig.from_dict(_A )
A__ = BertForMaskedLM(_A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__UpperCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__UpperCamelCase , __UpperCamelCase ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
A__ = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
A__ = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
A__ = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
A__ = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
A__ = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
A__ = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
A__ = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
A__ = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
A__ = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
A__ = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
A__ = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
A__ = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained('roberta-base' )
A__ = tokenizer.encode_plus(_A )['input_ids']
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=_A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_A )
A__ = BertModel.from_pretrained(_A )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(_A , return_tensors='pt' )
A__ = hf_bort_model(**_A )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(_A , _A , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , _A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 9
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = """T5Config"""
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """mt5"""
_SCREAMING_SNAKE_CASE = MTaConfig
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """mt5"""
_SCREAMING_SNAKE_CASE = MTaConfig
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = """mt5"""
_SCREAMING_SNAKE_CASE = MTaConfig
| 197
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowercase = ['''small''', '''medium''', '''large''']
_lowercase = '''lm_head.decoder.weight'''
_lowercase = '''lm_head.weight'''
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = torch.load(snake_case__ )
A = d.pop(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
_lowercase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowercase = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
_lowercase = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 712
|
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase : int = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase : Optional[int] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] )
return (item, float(_UpperCamelCase ))
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = random.randint(0 , len(_UpperCamelCase ) - 1 )
__SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
__SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__SCREAMING_SNAKE_CASE = random.choice(_UpperCamelCase )
return "".join(_UpperCamelCase )
def a__ ( a__ , a__ , a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
# Generate more children proportionally to the fitness score.
__SCREAMING_SNAKE_CASE = int(parent_a[1] * 1_00 ) + 1
__SCREAMING_SNAKE_CASE = 10 if child_n >= 10 else child_n
for _ in range(_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = population_score[random.randint(0 , _UpperCamelCase )][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = crossover(parent_a[0] , _UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
return pop
def a__ ( a__ , a__ , a__ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__SCREAMING_SNAKE_CASE = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__SCREAMING_SNAKE_CASE = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__SCREAMING_SNAKE_CASE = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCamelCase )
# Generate random starting population.
__SCREAMING_SNAKE_CASE = []
for _ in range(_UpperCamelCase ):
population.append("""""".join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__SCREAMING_SNAKE_CASE = [evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
__SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda a__ : x[1] , reverse=_UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__SCREAMING_SNAKE_CASE = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCamelCase )
# Normalize population score to be between 0 and 1.
__SCREAMING_SNAKE_CASE = [
(item, score / len(_UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCamelCase ):
population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCAmelCase : Dict = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCAmelCase : Dict = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 627
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''mock-s3-bucket'''
UpperCAmelCase_ = F"""s3://{mock_bucket}"""
UpperCAmelCase_ = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
UpperCAmelCase_ = '''./local/path'''
UpperCAmelCase_ = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def __lowerCamelCase ( _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
UpperCAmelCase_ = fsspec.filesystem('''file''' )
UpperCAmelCase_ = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
UpperCAmelCase_ = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_ = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
UpperCAmelCase_ = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = os.path.basename(_UpperCamelCase )
UpperCAmelCase_ = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
UpperCAmelCase_ = compressed_file_paths[protocol]
UpperCAmelCase_ = '''dataset.jsonl'''
UpperCAmelCase_ = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase_ , *UpperCAmelCase_ = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
UpperCAmelCase_ = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 390
| 0
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def A_ (__a ):
'''simple docstring'''
@wraps(__a )
def _inner_fn(*__a , **__a ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , __a , )
return fn(*__a , **__a )
return _inner_fn
| 482
|
"""simple docstring"""
from __future__ import annotations
import math
UpperCamelCase_ : List[str] = '''2020.9.26'''
UpperCamelCase_ : List[Any] = '''xcodz-dot, cclaus, dhruvmanila'''
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
if not all(isinstance(__a , (float, int) ) for val in locals().values() ):
A_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__a )
A_ = ((x * distance) / (z + distance)) * scale
A_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
if not isinstance(__a , __a ):
raise TypeError("Axis must be a str" )
A_ = locals()
del input_variables["axis"]
if not all(isinstance(__a , (float, int) ) for val in input_variables.values() ):
A_ = (
"Input values except axis must either be float or int: "
f'{list(input_variables.values() )}'
)
raise TypeError(__a )
A_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A_ = x * math.cos(__a ) - y * math.sin(__a )
A_ = y * math.cos(__a ) + x * math.sin(__a )
A_ = z
elif axis == "x":
A_ = y * math.cos(__a ) - z * math.sin(__a )
A_ = z * math.cos(__a ) + y * math.sin(__a )
A_ = x
elif axis == "y":
A_ = x * math.cos(__a ) - z * math.sin(__a )
A_ = z * math.cos(__a ) + x * math.sin(__a )
A_ = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 482
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__a : Dict = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
"""simple docstring"""
if attention_mask is None:
__lowercase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowercase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowercase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=0.02 , ) -> Any:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = initializer_range
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowercase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowercase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
__lowercase = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCAmelCase__ )
__lowercase = model.encode(inputs_dict['''input_ids'''] )
__lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
__lowercase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = 20
__lowercase = model_class_name(lowerCAmelCase__ )
__lowercase = model.encode(inputs_dict['''input_ids'''] )
__lowercase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
__lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowercase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
__lowercase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
__lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : Optional[int] = 99
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowercase = input_ids.shape[0]
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self._get_config_and_data()
__lowercase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
__lowercase = lm_model(input_ids=lowerCAmelCase__ )
__lowercase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowercase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
__lowercase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowercase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowercase = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
__lowercase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowercase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
__lowercase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
__lowercase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _UpperCamelCase ( A__ ,unittest.TestCase ,A__ ):
"""simple docstring"""
__a : str = True
__a : Any = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__a : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = FlaxBlenderbotModelTester(self )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
__lowercase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowercase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__lowercase = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
__lowercase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowercase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowercase = np.ones((1, 1) ) * model.config.eos_token_id
__lowercase = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
__lowercase = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__lowercase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowerCAmelCase__ )
__lowercase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__lowercase = ["Sam"]
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''jax''' )
__lowercase = model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = "Sam is a great name. It means \"sun\" in Gaelic."
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , **lowerCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 534
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __a ( unittest.TestCase ):
_lowerCAmelCase : Any = MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase : Any = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ : str = text_generator("This is a test" , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
UpperCamelCase__ : Optional[Any] = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
UpperCamelCase__ : Union[str, Any] = text_generator("This is a test" , do_sample=SCREAMING_SNAKE_CASE , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"generated_token_ids": ANY(SCREAMING_SNAKE_CASE )},
{"generated_token_ids": ANY(SCREAMING_SNAKE_CASE )},
] , )
UpperCamelCase__ : Optional[int] = text_generator.model.config.eos_token_id
UpperCamelCase__ : Union[str, Any] = "<pad>"
UpperCamelCase__ : Any = text_generator(
["This is a test", "This is a second test"] , do_sample=SCREAMING_SNAKE_CASE , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE , )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"generated_token_ids": ANY(SCREAMING_SNAKE_CASE )},
{"generated_token_ids": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"generated_token_ids": ANY(SCREAMING_SNAKE_CASE )},
{"generated_token_ids": ANY(SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ : Dict = text_generator("This is a test" , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
UpperCamelCase__ : List[Any] = text_generator(["This is a test", "This is a second test"] , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
return text_generator, ["This is a test", "Another test"]
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : str = "Hello I believe in"
UpperCamelCase__ : int = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ : List[Any] = text_generator(SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
UpperCamelCase__ : Optional[int] = text_generator(SCREAMING_SNAKE_CASE , stop_sequence=" fe" )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": "Hello I believe in fe"}] )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : Dict = text_generator.model
UpperCamelCase__ : List[str] = text_generator.tokenizer
UpperCamelCase__ : Optional[Any] = text_generator("This is a test" )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ANY(SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ : Any = text_generator("This is a test" , return_full_text=SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ANY(SCREAMING_SNAKE_CASE )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCamelCase__ : Optional[int] = pipeline(task="text-generation" , model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , return_full_text=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = text_generator("This is a test" )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ANY(SCREAMING_SNAKE_CASE )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCamelCase__ : str = text_generator("This is a test" , return_full_text=SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ANY(SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCamelCase__ : List[str] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ : Tuple = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
[{"generated_text": ANY(SCREAMING_SNAKE_CASE )}, {"generated_text": ANY(SCREAMING_SNAKE_CASE )}],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = text_generator("test" , return_full_text=SCREAMING_SNAKE_CASE , return_text=SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = text_generator("test" , return_full_text=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = text_generator("test" , return_text=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ : Optional[Any] = text_generator("" )
self.assertEqual(SCREAMING_SNAKE_CASE , [{"generated_text": ANY(SCREAMING_SNAKE_CASE )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ : Dict = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ : Tuple = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_00 , max_new_tokens=20 )
UpperCamelCase__ : Dict = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(SCREAMING_SNAKE_CASE ):
text_generator(
"This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowercase ( self : Tuple ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
UpperCamelCase__ : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ : Tuple = pipe("This is a test" )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ : List[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ : int = pipe("This is a test" )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ : Union[str, Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase__ : Union[str, Any] = pipe("This is a test" )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __lowercase ( self : Any ):
'''simple docstring'''
import torch
UpperCamelCase__ : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowercase ( self : List[Any] ):
'''simple docstring'''
import torch
UpperCamelCase__ : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=SCREAMING_SNAKE_CASE , top_p=0.5 )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : str = "Hello world"
UpperCamelCase__ : Any = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCamelCase__ : List[str] = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCamelCase__ : List[Any] = logging.get_logger("transformers.generation.utils" )
UpperCamelCase__ : Optional[Any] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(SCREAMING_SNAKE_CASE ) as cl:
UpperCamelCase__ : Optional[int] = text_generator(SCREAMING_SNAKE_CASE , max_length=10 , max_new_tokens=1 )
self.assertIn(SCREAMING_SNAKE_CASE , cl.out )
# The user only sets one -> no warning
with CaptureLogger(SCREAMING_SNAKE_CASE ) as cl:
UpperCamelCase__ : List[str] = text_generator(SCREAMING_SNAKE_CASE , max_new_tokens=1 )
self.assertNotIn(SCREAMING_SNAKE_CASE , cl.out )
with CaptureLogger(SCREAMING_SNAKE_CASE ) as cl:
UpperCamelCase__ : List[str] = text_generator(SCREAMING_SNAKE_CASE , max_length=10 )
self.assertNotIn(SCREAMING_SNAKE_CASE , cl.out )
| 228
| 0
|
_lowerCamelCase : Union[str, Any] = "Alexander Joslin"
import operator as op
from .stack import Stack
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Any = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
_lowerCAmelCase : Stack[int] = Stack()
_lowerCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase_ )
elif i == ")":
# RULE 4
_lowerCAmelCase : int = operator_stack.peek()
operator_stack.pop()
_lowerCAmelCase : List[str] = operand_stack.peek()
operand_stack.pop()
_lowerCAmelCase : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
_lowerCAmelCase : Dict = operators[opr](UpperCamelCase_ , UpperCamelCase_ )
operand_stack.push(UpperCamelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCamelCase : str = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 196
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __snake_case :
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Any = data
_lowerCAmelCase : Node | None = None
class __snake_case :
def __init__( self : Tuple ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : str = None
_lowerCAmelCase : List[Any] = None
def __iter__( self : str ) -> Iterator[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.head
while self.head:
yield node.data
_lowerCAmelCase : List[str] = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Optional[int] ) -> str:
'''simple docstring'''
return "->".join(str(_UpperCAmelCase ) for item in iter(self ) )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
_lowerCAmelCase : Optional[int] = Node(_UpperCAmelCase )
if self.head is None:
_lowerCAmelCase : Tuple = new_node # first node points itself
_lowerCAmelCase : int = new_node
elif index == 0: # insert at head
_lowerCAmelCase : Tuple = self.head
_lowerCAmelCase : int = new_node
else:
_lowerCAmelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCAmelCase : Optional[Any] = temp.next
_lowerCAmelCase : Optional[int] = temp.next
_lowerCAmelCase : Optional[int] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCAmelCase : Optional[Any] = new_node
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
_lowerCAmelCase : Optional[Any] = self.head
if self.head == self.tail: # just one node
_lowerCAmelCase : Optional[Any] = None
elif index == 0: # delete head node
_lowerCAmelCase : str = self.tail.next.next
_lowerCAmelCase : List[str] = self.head.next
else:
_lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
_lowerCAmelCase : List[Any] = temp.next
_lowerCAmelCase : List[str] = temp.next
_lowerCAmelCase : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCAmelCase : Optional[int] = temp
return delete_node.data
def SCREAMING_SNAKE_CASE ( self : int ) -> bool:
'''simple docstring'''
return len(self ) == 0
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Dict = CircularLinkedList()
assert len(UpperCamelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase_ ) == i
circular_linked_list.insert_nth(UpperCamelCase_ , i + 1 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Dict = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=__magic_name__ )
UpperCAmelCase : Tuple = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
UpperCAmelCase : Optional[int] = parser.parse_args()
if not hasattr(__magic_name__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 679
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a : Any = {
"allenai/led-base-16384": 1_63_84,
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Tuple = LEDTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(snake_case , pre_tok_state.pop("type" ) )
UpperCAmelCase : Any = add_prefix_space
UpperCAmelCase : str = pre_tok_class(**snake_case )
UpperCAmelCase : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase : Dict = "post_processor"
UpperCAmelCase : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase : Union[str, Any] = tuple(state["cls"] )
UpperCAmelCase : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
UpperCAmelCase : Tuple = trim_offsets
UpperCAmelCase : List[str] = True
if changes_to_apply:
UpperCAmelCase : Optional[Any] = getattr(snake_case , state.pop("type" ) )
UpperCAmelCase : Tuple = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCAmelCase : Optional[Any] = value
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : int = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Optional[int] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
UpperCAmelCase : Tuple = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : List[str] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 679
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BeitFeatureExtractor"""]
lowerCamelCase__ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""DPTFeatureExtractor"""]
lowerCamelCase__ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
class __UpperCAmelCase (__A , __A ):
'''simple docstring'''
_UpperCamelCase : str = 'maskformer-swin'
_UpperCamelCase : int = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=224 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=None , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ )
A__ : Tuple = image_size
A__ : Tuple = patch_size
A__ : Union[str, Any] = num_channels
A__ : Optional[Any] = embed_dim
A__ : Dict = depths
A__ : List[str] = len(snake_case_ )
A__ : Optional[Any] = num_heads
A__ : Optional[Any] = window_size
A__ : List[Any] = mlp_ratio
A__ : Dict = qkv_bias
A__ : Any = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : Dict = drop_path_rate
A__ : str = hidden_act
A__ : str = use_absolute_embeddings
A__ : Dict = layer_norm_eps
A__ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ : int = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
A__ : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
A__ , A__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 363
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_UpperCamelCase = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_UpperCamelCase = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase (datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : str = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A__ : Optional[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A__ : Optional[Any] = evaluate(dataset=snake_case_ , predictions=snake_case_ )
return score
| 363
| 1
|
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : list):
'''simple docstring'''
snake_case__ = set_counts
snake_case__ = max(UpperCamelCase__)
snake_case__ = len(UpperCamelCase__)
snake_case__ = [1] * num_sets
snake_case__ = list(range(UpperCamelCase__))
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.get_parent(UpperCamelCase__)
snake_case__ = self.get_parent(UpperCamelCase__)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
snake_case__ = 0
snake_case__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
snake_case__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
snake_case__ = 0
snake_case__ = src_parent
snake_case__ = self.set_counts[src_parent]
snake_case__ = max(self.max_set , UpperCamelCase__)
return True
def __magic_name__ ( self : str , UpperCamelCase__ : int):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
snake_case__ = self.get_parent(self.parents[disj_set])
return self.parents[disj_set]
| 99
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ = 2_5_6
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = ['''melgan''']
def __init__( self : int , UpperCamelCase__ : SpectrogramNotesEncoder , UpperCamelCase__ : SpectrogramContEncoder , UpperCamelCase__ : TaFilmDecoder , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : OnnxRuntimeModel if is_onnx_available() else Any , ):
'''simple docstring'''
super().__init__()
# From MELGAN
snake_case__ = math.log(1E-5) # Matches MelGAN training.
snake_case__ = 4.0 # Largest value for most examples
snake_case__ = 1_2_8
self.register_modules(
notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , )
def __magic_name__ ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int=(-1.0, 1.0) , UpperCamelCase__ : Tuple=False):
'''simple docstring'''
snake_case__ , snake_case__ = output_range
if clip:
snake_case__ = torch.clip(UpperCamelCase__ , self.min_value , self.max_value)
# Scale to [0, 1].
snake_case__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=(-1.0, 1.0) , UpperCamelCase__ : Optional[Any]=False):
'''simple docstring'''
snake_case__ , snake_case__ = input_range
snake_case__ = torch.clip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) if clip else outputs
# Scale to [0, 1].
snake_case__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = input_tokens > 0
snake_case__ , snake_case__ = self.notes_encoder(
encoder_input_tokens=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__)
snake_case__ , snake_case__ = self.continuous_encoder(
encoder_inputs=UpperCamelCase__ , encoder_inputs_mask=UpperCamelCase__)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = noise_time
if not torch.is_tensor(UpperCamelCase__):
snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0:
snake_case__ = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
snake_case__ = self.decoder(
encodings_and_masks=UpperCamelCase__ , decoder_input_tokens=UpperCamelCase__ , decoder_noise_time=UpperCamelCase__)
return logits
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase__ : List[List[int]] , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : int = 1_0_0 , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "numpy" , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase__)}.''')
snake_case__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
snake_case__ = np.zeros([1, 0, self.n_dims] , np.floataa)
snake_case__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device)
for i, encoder_input_tokens in enumerate(UpperCamelCase__):
if i == 0:
snake_case__ = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
snake_case__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase__ , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
snake_case__ = ones
snake_case__ = self.scale_features(
UpperCamelCase__ , output_range=[-1.0, 1.0] , clip=UpperCamelCase__)
snake_case__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=UpperCamelCase__ , continuous_mask=UpperCamelCase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
snake_case__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase__)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
snake_case__ = self.decode(
encodings_and_masks=UpperCamelCase__ , input_tokens=UpperCamelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample
snake_case__ = self.scale_to_features(UpperCamelCase__ , input_range=[-1.0, 1.0])
snake_case__ = mel[:1]
snake_case__ = mel.cpu().float().numpy()
snake_case__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__)
logger.info("""Generated segment""" , UpperCamelCase__)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""")
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""")
if output_type == "numpy":
snake_case__ = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
snake_case__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase__)
| 99
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase ={
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase =[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_snake_case , 'num_encoder_blocks' ) )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : str , _snake_case : Union[str, Any]=13 , _snake_case : Any=64 , _snake_case : Optional[Any]=3 , _snake_case : Dict=4 , _snake_case : Tuple=[2, 2, 2, 2] , _snake_case : str=[8, 4, 2, 1] , _snake_case : Union[str, Any]=[16, 32, 64, 1_28] , _snake_case : int=[1, 4, 8, 16] , _snake_case : List[str]=[1, 2, 4, 8] , _snake_case : int=True , _snake_case : int=True , _snake_case : Union[str, Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=0.02 , _snake_case : Tuple=3 , _snake_case : int=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def _a ( self : int ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : int ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : int , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Any ):
"""simple docstring"""
A__ = SegformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_snake_case )
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ : Optional[Any] = True
A__ : str = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_snake_case )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(_snake_case ) , _snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(_snake_case )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
A__ = outputs.attentions
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Dict , _snake_case : int , _snake_case : List[Any] ):
A__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ):
continue
A__ = model_class(_snake_case )
model.to(_snake_case )
model.train()
A__ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
A__ = model(**_snake_case ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> str:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Dict ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-4 ) )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _snake_case , atol=1E-1 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
A__ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_snake_case )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = encoded_inputs.pixel_values.to(_snake_case )
with torch.no_grad():
A__ = model(_snake_case )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _snake_case )
A__ = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _snake_case )
| 9
| 0
|
'''simple docstring'''
from math import sqrt
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCAmelCase : List[Any] =True
# 0 and 1 are none primes.
if number <= 1:
_UpperCAmelCase : Optional[Any] =False
for divisor in range(2 , int(round(sqrt(__lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCAmelCase : Tuple =False
break
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'status' must been from type bool"
return status
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCAmelCase : int =list(range(2 , n + 1 ) )
_UpperCAmelCase : Union[str, Any] =[] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 , len(__lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCAmelCase : List[Any] =0
# filters actual prime numbers.
_UpperCAmelCase : str =[x for x in begin_list if x != 0]
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
_UpperCAmelCase : Any =[]
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__lowerCamelCase ):
ans.append(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
_UpperCAmelCase : Any =[] # this list will be returns of the function.
# potential prime number factors.
_UpperCAmelCase : int =2
_UpperCAmelCase : Tuple =number
if number == 0 or number == 1:
ans.append(__lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__lowerCamelCase ):
while quotient != 1:
if is_prime(__lowerCamelCase ) and (quotient % factor == 0):
ans.append(__lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type list"
return ans
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase : List[Any] =0
# prime factorization of 'number'
_UpperCAmelCase : str =prime_factorization(__lowerCamelCase )
_UpperCAmelCase : str =max(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase : List[str] =0
# prime factorization of 'number'
_UpperCAmelCase : Tuple =prime_factorization(__lowerCamelCase )
_UpperCAmelCase : Optional[int] =min(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type int"
return ans
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and (number > 2) and is_even(__lowerCamelCase )
), "'number' must been an int, even and > 2"
_UpperCAmelCase : str =[] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCAmelCase : Optional[int] =get_prime_numbers(__lowerCamelCase )
_UpperCAmelCase : List[str] =len(__lowerCamelCase )
# run variable for while-loops.
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : Optional[int] =None
# exit variable. for break up the loops
_UpperCAmelCase : Optional[int] =True
while i < len_pn and loop:
_UpperCAmelCase : Any =i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCAmelCase : List[Any] =False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and (len(__lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase : int =0
while numbera != 0:
_UpperCAmelCase : int =numbera % numbera
_UpperCAmelCase : Optional[int] =numbera
_UpperCAmelCase : Tuple =rest
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase : Dict =1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCAmelCase : int =prime_factorization(__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =prime_factorization(__lowerCamelCase )
elif numbera == 1 or numbera == 1:
_UpperCAmelCase : List[str] =[]
_UpperCAmelCase : int =[]
_UpperCAmelCase : Any =max(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : List[Any] =0
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : List[str] =[] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCAmelCase : Optional[Any] =prime_fac_a.count(__lowerCamelCase )
_UpperCAmelCase : str =prime_fac_a.count(__lowerCamelCase )
for _ in range(max(__lowerCamelCase , __lowerCamelCase ) ):
ans *= n
else:
_UpperCAmelCase : Any =prime_fac_a.count(__lowerCamelCase )
for _ in range(__lowerCamelCase ):
ans *= n
done.append(__lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCAmelCase : Dict =prime_fac_a.count(__lowerCamelCase )
for _ in range(__lowerCamelCase ):
ans *= n
done.append(__lowerCamelCase )
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
_UpperCAmelCase : Union[str, Any] =0
_UpperCAmelCase : str =2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__lowerCamelCase ):
ans += 1
# precondition
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and is_prime(
__lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
assert (
is_prime(__lowerCamelCase ) and is_prime(__lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCAmelCase : int =p_number_a + 1 # jump to the next number
_UpperCAmelCase : Any =[] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(__lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(__lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and ans[0] != p_number_a
and ans[len(__lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
_UpperCAmelCase : str =[] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(__lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCAmelCase : List[str] =get_divisors(__lowerCamelCase )
# precondition
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(__lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
'''simple docstring'''
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCAmelCase : Tuple =gcd(abs(__lowerCamelCase ) , abs(__lowerCamelCase ) )
# precondition
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCAmelCase : Tuple =1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCAmelCase : List[str] =0
_UpperCAmelCase : Optional[int] =1
_UpperCAmelCase : int =1 # this will be return
for _ in range(n - 1 ):
_UpperCAmelCase : Optional[int] =ans
ans += fiba
_UpperCAmelCase : List[str] =tmp
return ans
| 331
|
'''simple docstring'''
from __future__ import annotations
lowercase ='Muhammad Umer Farooq'
lowercase ='MIT'
lowercase ='1.0.0'
lowercase ='Muhammad Umer Farooq'
lowercase ='contact@muhammadumerfarooq.me'
lowercase ='Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case) -> None:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : list[str] =[]
_UpperCAmelCase : List[Any] =domain
def lowerCAmelCase ( self , snake_case , snake_case) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCAmelCase : Optional[int] =parse.urljoin(self.domain , snake_case)
self.urls.append(snake_case)
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
return ".".join(get_sub_domain_name(__lowerCamelCase ).split('.' )[-2:] )
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
return parse.urlparse(__lowerCamelCase ).netloc
def lowerCamelCase__ ( __lowerCamelCase : str = "https://github.com" ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =get_domain_name(__lowerCamelCase )
# Initialize the parser
_UpperCAmelCase : Optional[Any] =Parser(__lowerCamelCase )
try:
# Open URL
_UpperCAmelCase : Any =requests.get(__lowerCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCAmelCase : Dict =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCAmelCase : Dict =requests.get(__lowerCamelCase )
# Get the valid email.
_UpperCAmelCase : List[str] =re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCamelCase )
if __name__ == "__main__":
lowercase =emails_from_url('https://github.com')
print(F"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 331
| 1
|
'''simple docstring'''
from __future__ import annotations
__a = list[list[int]]
# assigning initial values to the grid
__a = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__a = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __snake_case( _lowerCAmelCase ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __snake_case( _lowerCAmelCase ) -> Matrix | None:
if location := find_empty_location(_lowerCAmelCase ):
snake_case__ , snake_case__ : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : List[Any] = digit
if sudoku(_lowerCAmelCase ) is not None:
return grid
snake_case__ : List[Any] = 0
return None
def __snake_case( _lowerCAmelCase ) -> None:
for row in grid:
for cell in row:
print(_lowerCAmelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__a = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 374
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __snake_case( _lowerCAmelCase=True ) -> Dict:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a ) )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = None
lowercase = None
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
with TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = dataset_module_factory(snake_case_ , cache_dir=snake_case_ )
snake_case__ : Optional[int] = import_main_class(dataset_module.module_path , dataset=snake_case_ )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=snake_case_ , config_name=snake_case_ , hash=dataset_module.hash , )
snake_case__ : Dict = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
snake_case__ : List[str] = cached_path(snake_case_ , cache_dir=snake_case_ )
self.assertTrue(os.path.exists(snake_case_ ) )
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
snake_case__ : Dict = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : Dict = import_main_class(dataset_module.module_path )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case__ : Any = None
builder_instance.download_and_prepare()
snake_case__ : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
snake_case__ : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 374
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( _lowerCamelCase ):
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : Optional[int] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : Union[str, Any] ):
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
snake_case_ = kwargs.pop('''feature_extractor''' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self : str , lowercase_ : List[str]=None , lowercase_ : List[str]=None , lowercase_ : List[str]=None , **lowercase_ : Dict ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
snake_case_ = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def A_ ( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ):
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def A_ ( self : int , *lowercase_ : str , **lowercase_ : Any ):
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def A_ ( self : Tuple ):
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 721
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a : Dict = logging.getLogger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = False, ) -> Tuple:
'''simple docstring'''
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
snake_case_ = []
# custom device map
if isinstance(__UpperCAmelCase, __UpperCAmelCase ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(__UpperCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCAmelCase )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCAmelCase )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(__UpperCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
snake_case_ = replace_with_bnb_layers(__UpperCAmelCase, __UpperCAmelCase, modules_to_not_convert=__UpperCAmelCase )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace('''.weight''', '''''' ).replace('''.bias''', '''''' )
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__UpperCAmelCase ):
param.to(__UpperCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
__UpperCAmelCase, __UpperCAmelCase, modules_to_not_convert=__UpperCAmelCase )
snake_case_ = get_quantized_model_device_map(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, max_memory=__UpperCAmelCase, no_split_module_classes=__UpperCAmelCase, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, dtype=bnb_quantization_config.torch_dtype, offload_folder=__UpperCAmelCase, offload_state_dict=__UpperCAmelCase, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(__UpperCAmelCase, device_map=__UpperCAmelCase, offload_dir=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
__UpperCAmelCase, low_zero=(device_map == '''balanced_low_0'''), max_memory=__UpperCAmelCase, **__UpperCAmelCase, )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(__UpperCAmelCase, **__UpperCAmelCase )
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None ) -> List[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ ,snake_case_ = _replace_with_bnb_layers(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, __UpperCAmelCase=None, ) -> str:
'''simple docstring'''
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(__UpperCAmelCase )
if isinstance(__UpperCAmelCase, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = '''.'''.join(__UpperCAmelCase )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=__UpperCAmelCase, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(__UpperCAmelCase )
setattr(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ ,snake_case_ = _replace_with_bnb_layers(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with init_empty_weights():
snake_case_ = deepcopy(__UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(__UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(__UpperCAmelCase, [] )
snake_case_ = len(__UpperCAmelCase ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(__UpperCAmelCase, '''base_model_prefix''' ):
snake_case_ = not hasattr(__UpperCAmelCase, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(__UpperCAmelCase ) - set(__UpperCAmelCase )
snake_case_ = list(set(__UpperCAmelCase ) ) + list(__UpperCAmelCase )
# remove ".weight" from the keys
snake_case_ = ['''.weight''', '''.bias''']
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(__UpperCAmelCase, '''''' )
filtered_module_names.append(__UpperCAmelCase )
return filtered_module_names
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
for m in model.modules():
if isinstance(__UpperCAmelCase, bnb.nn.Linearabit ):
return True
return False
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
return next(parameter.parameters() ).device
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCAmelCase, __UpperCAmelCase, 0, dtype=__UpperCAmelCase, value=__UpperCAmelCase )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split('''.''' )
for split in splits[:-1]:
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name], __UpperCAmelCase, __UpperCAmelCase, index=__UpperCAmelCase )
if hasattr(module._parameters[tensor_name], '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace('''weight''', '''SCB''' ), __UpperCAmelCase, index=__UpperCAmelCase, )
else:
offload_weight(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, index=__UpperCAmelCase )
offload_weight(__UpperCAmelCase, param_name.replace('''weight''', '''SCB''' ), __UpperCAmelCase, index=__UpperCAmelCase )
set_module_tensor_to_device(__UpperCAmelCase, __UpperCAmelCase, '''meta''', dtype=__UpperCAmelCase, value=torch.empty(*param.size() ) )
| 593
| 0
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : int ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Any ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_, split=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type', [str, list] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any, lowerCAmelCase_ : Dict ):
if issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = text_path
elif issubclass(lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_dataset(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple=("train",) ):
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory', [False, True] )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Dict ):
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({'train': text_path}, cache_dir=lowerCAmelCase_, keep_in_memory=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize(
'features', [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
], )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowerCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({'train': text_path}, features=lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_ )
@pytest.mark.parametrize('split', [None, NamedSplit('train' ), 'train', 'test'] )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = 'train'
__lowerCAmelCase = {'train': text_path, 'test': text_path}
__lowerCAmelCase = tmp_path / 'cache'
__lowerCAmelCase = {'text': 'string'}
__lowerCAmelCase = TextDatasetReader(lowerCAmelCase_, cache_dir=lowerCAmelCase_ ).read()
_check_text_datasetdict(lowerCAmelCase_, lowerCAmelCase_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 53
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653
| 0
|
'''simple docstring'''
from collections import deque
def __lowerCamelCase ( __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[str] =len(__snake_case )
A__ : Optional[Any] =deque()
A__ : Tuple =[False for _ in range(__snake_case )]
A__ : Any =[-1 for _ in range(__snake_case )]
A__ : List[str] =index_of[:]
def strong_connect(__snake_case : Any, __snake_case : Optional[int], __snake_case : Union[str, Any] ):
A__ : Dict =index # the number when this node is seen
A__ : Optional[Any] =index # lowest rank node reachable from here
index += 1
stack.append(__snake_case )
A__ : Dict =True
for w in g[v]:
if index_of[w] == -1:
A__ : List[str] =strong_connect(__snake_case, __snake_case, __snake_case )
A__ : str =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A__ : List[str] =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A__ : Any =[]
A__ : Tuple =stack.pop()
A__ : List[str] =False
component.append(__snake_case )
while w != v:
A__ : Union[str, Any] =stack.pop()
A__ : List[Any] =False
component.append(__snake_case )
components.append(__snake_case )
return index
A__ : Any =[]
for v in range(__snake_case ):
if index_of[v] == -1:
strong_connect(__snake_case, 0, __snake_case )
return components
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any ) -> List[str]:
"""simple docstring"""
A__ : Dict =[[] for _ in range(__snake_case )]
for u, v in edges:
g[u].append(__snake_case )
return g
if __name__ == "__main__":
# Test
__snake_case : Optional[int] = 7
__snake_case : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__snake_case : List[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__snake_case : str = [(u, v) for u, v in zip(source, target)]
__snake_case : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 687
|
'''simple docstring'''
from __future__ import annotations
import requests
__snake_case : Union[str, Any] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 1, __snake_case : str = "new", __snake_case : list | None = None ) -> dict:
"""simple docstring"""
A__ : Union[str, Any] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__snake_case ) - valid_terms ) ):
A__ : Optional[int] =f"Invalid search term: {invalid_search_terms}"
raise ValueError(__snake_case )
A__ : Tuple =requests.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"""User-agent""": """A random string"""}, )
if response.status_code == 429:
raise requests.HTTPError
A__ : Tuple =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__snake_case )}
A__ : Tuple ={}
for id_ in range(__snake_case ):
A__ : List[Any] ={
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 687
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_A : int = logging.get_logger(__name__)
_A : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A : Any = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_A : Optional[int] = {
"""distilbert-base-uncased""": 5_12,
"""distilbert-base-uncased-distilled-squad""": 5_12,
"""distilbert-base-cased""": 5_12,
"""distilbert-base-cased-distilled-squad""": 5_12,
"""distilbert-base-german-cased""": 5_12,
"""distilbert-base-multilingual-cased""": 5_12,
}
_A : Optional[Any] = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Dict = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ : str = DistilBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , A_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , A_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , A_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(A_ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**A_ )
SCREAMING_SNAKE_CASE__ = do_lower_case
def lowercase_ ( self , A_ , A_=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , A_ , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 100
|
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Any = FunnelConfig.from_json_file(a_ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__ : str = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a_ , a_ , a_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
UpperCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 718
|
"""simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase__ : Tuple = remove_duplicates(key.upper() )
lowerCamelCase__ : Optional[Any] = len(_lowercase )
# First fill cipher with key characters
lowerCamelCase__ : int = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
lowerCamelCase__ : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase__ : Optional[Any] = alphabet[i - offset]
lowerCamelCase__ : Union[str, Any] = char
return cipher_alphabet
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = input('''Enter message to encode or decode: ''' ).strip()
lowerCamelCase__ : List[str] = input('''Enter keyword: ''' ).strip()
lowerCamelCase__ : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCamelCase__ : int = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCamelCase__ : Optional[Any] = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] ='''ylacombe/bark-small'''
snake_case__ : List[str] =tempfile.mkdtemp()
snake_case__ : str ='''en_speaker_1'''
snake_case__ : Dict ='''This is a test string'''
snake_case__ : Optional[int] ='''speaker_embeddings_path.json'''
snake_case__ : Optional[int] ='''speaker_embeddings'''
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict =self.get_tokenizer()
snake_case__ : List[str] =BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
snake_case__ : List[str] =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case__ : Optional[int] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case__ : int =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Tuple =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case__ : Tuple =35
snake_case__ : List[str] =2
snake_case__ : List[Any] =8
snake_case__ : Dict ={
'''semantic_prompt''': np.ones(__SCREAMING_SNAKE_CASE ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case__ : Dict =processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case__ : List[Any] =os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case__ : Dict =processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any =self.get_tokenizer()
snake_case__ : Union[str, Any] =BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =processor(text=self.input_string )
snake_case__ : Dict =tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 381
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase_ ( SCREAMING_SNAKE_CASE : ndarray ):
"""simple docstring"""
return np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , *,
__SCREAMING_SNAKE_CASE = np.inf , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = 0.0 , ) -> None:
"""simple docstring"""
snake_case__ : int =regularization
snake_case__ : Tuple =gamma
if kernel == "linear":
snake_case__ : str =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
snake_case__ : Dict =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
snake_case__ : Optional[int] =f'''Unknown kernel: {kernel}'''
raise ValueError(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
snake_case__ : Optional[Any] =observations
snake_case__ : Union[str, Any] =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((snake_case__), ) : Dict =np.shape(__SCREAMING_SNAKE_CASE )
def to_minimize(__SCREAMING_SNAKE_CASE ) -> float:
snake_case__ : List[str] =0
((snake_case__), ) : int =np.shape(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =LinearConstraint(__SCREAMING_SNAKE_CASE , 0 , 0 )
snake_case__ : int =Bounds(0 , self.regularization )
snake_case__ : Union[str, Any] =minimize(
__SCREAMING_SNAKE_CASE , np.ones(__SCREAMING_SNAKE_CASE ) , bounds=__SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
snake_case__ : Optional[int] =l_star
# calculating mean offset of separation plane to points
snake_case__ : Optional[Any] =0
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
snake_case__ : int =s / n
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
snake_case__ : int =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 381
| 1
|
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
assert column_title.isupper()
_UpperCAmelCase = 0
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
_UpperCAmelCase = 0
while index >= 0:
_UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26,SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 494
|
"""simple docstring"""
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
_UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCAmelCase = True
for i in range(0,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
for i in range(1,len(SCREAMING_SNAKE_CASE ) - 1,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCAmelCase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCAmelCase_ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 494
| 1
|
from __future__ import annotations
_lowerCamelCase : List[Any] = 8.9_8_8e9 # units = N * m^s * C^-2
def _a ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> dict[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
SCREAMING_SNAKE_CASE__ : List[str] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
SCREAMING_SNAKE_CASE__ : List[str] = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(SCREAMING_SNAKE_CASE__ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663
| 1
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCamelCase : Any = True
except ImportError:
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( SCREAMING_SNAKE_CASE : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __magic_name__ ( __lowerCAmelCase):
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Dict = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowerCamelCase__ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowerCamelCase__ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[str] , lowerCamelCase__ : bool , lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=None , *lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = testing
UpperCamelCase__ : int = testing_file
UpperCamelCase__ : List[str] = path
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase__ : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase__ : Optional[Any] = (
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase__ : Tuple = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase__ : str = json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
UpperCamelCase__ : Union[str, Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase__ : Any = json.load(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = configuration['''lowercase_modelname''']
UpperCamelCase__ : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"{directory}/configuration.json" )
UpperCamelCase__ : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : Dict = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : Dict = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase__ : Union[str, Any] = F"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
F"{directory}/__init__.py" , F"{model_dir}/__init__.py" , )
shutil.move(
F"{directory}/configuration_{lowercase_model_name}.py" , F"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(lowerCamelCase__ : Union[str, Any] ):
with open(lowerCamelCase__ , '''r''' ) as f:
UpperCamelCase__ : Tuple = f.readlines()
with open(lowerCamelCase__ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_{lowercase_model_name}.py" , F"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_tf_{lowercase_model_name}.py" , F"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_tf_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/modeling_flax_{lowercase_model_name}.py" , F"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/test_modeling_flax_{lowercase_model_name}.py" , F"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(F"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(F"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
F"{directory}/{lowercase_model_name}.md" , F"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
F"{directory}/tokenization_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
F"{directory}/tokenization_fast_{lowercase_model_name}.py" , F"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : List[str] ):
# Create temp file
UpperCamelCase__ , UpperCamelCase__ : int = mkstemp()
UpperCamelCase__ : Optional[int] = False
with fdopen(lowerCamelCase__ , '''w''' ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
UpperCamelCase__ : int = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(F"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(lowerCamelCase__ : int ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase__ : Tuple ):
with open(lowerCamelCase__ ) as datafile:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = False
UpperCamelCase__ : Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase__ : Optional[int] = line.split('''"''' )[1]
UpperCamelCase__ : Tuple = skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
UpperCamelCase__ : int = line.split('''"''' )[1]
UpperCamelCase__ : int = skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase__ : Dict = []
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(F"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(lowerCamelCase__ )
| 106
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__UpperCamelCase : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__UpperCamelCase : Optional[int] = "main"
# Default branch name
__UpperCamelCase : Optional[Any] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__UpperCamelCase : str = "aaaaaaa"
# This commit does not exist, so we should 404.
__UpperCamelCase : Optional[Any] = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__UpperCamelCase : Tuple = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class __magic_name__ ( unittest.TestCase):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
@require_tf
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ['''labels'''] )
@require_flax
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
class __magic_name__ ( __lowerCAmelCase):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
| 106
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__lowercase : str = ["""small""", """medium""", """large"""]
__lowercase : Optional[Any] = """lm_head.decoder.weight"""
__lowercase : Optional[int] = """lm_head.weight"""
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
__a : Union[str, Any] = torch.load(__lowerCamelCase )
__a : Union[str, Any] = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
__lowercase : int = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__lowercase : Optional[int] = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
__lowercase : Optional[Any] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 476
|
_SCREAMING_SNAKE_CASE : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355_818,
}
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ : List[str] = (
F'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
F'Valid values are: {", ".join(__lowerCamelCase )}'
)
raise ValueError(__lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self , __UpperCamelCase ) -> Optional[Any]:
# we need a list not a string, so do something to change the type
_a = arr.split("," )
def a_ ( self ) -> List[Any]:
_a = [int(self.array[0] )] * len(self.array )
_a = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_a = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_a = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase__ = input("please input some numbers:")
lowercase__ = SubArray(whole_array)
lowercase__ = array.solve_sub_array()
print(("the results is:", re))
| 276
| 0
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase_ : List[str] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for attribute in key.split(""".""" ):
_UpperCAmelCase : Any = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
_UpperCAmelCase : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
_UpperCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
_UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
_UpperCAmelCase : List[Any] = value
elif weight_type == "bias":
_UpperCAmelCase : List[Any] = value
else:
_UpperCAmelCase : Dict = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[str] = fairseq_model.state_dict()
_UpperCAmelCase : Tuple = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_UpperCAmelCase : List[str] = True
if "*" in mapped_key:
_UpperCAmelCase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
_UpperCAmelCase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "weight_g" in name:
_UpperCAmelCase : str = """weight_g"""
elif "weight_v" in name:
_UpperCAmelCase : Union[str, Any] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
_UpperCAmelCase : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : Optional[int] = """weight"""
else:
_UpperCAmelCase : List[Any] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase : str = name.split(""".""" )
_UpperCAmelCase : int = int(items[0] )
_UpperCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_UpperCAmelCase : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_UpperCAmelCase : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_UpperCAmelCase : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_UpperCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
# load the pre-trained checkpoints
_UpperCAmelCase : int = torch.load(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = WavLMConfigOrig(checkpoint["""cfg"""] )
_UpperCAmelCase : str = WavLMOrig(lowerCAmelCase_ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
_UpperCAmelCase : Tuple = WavLMConfig.from_pretrained(lowerCAmelCase_ )
else:
_UpperCAmelCase : Optional[int] = WavLMConfig()
_UpperCAmelCase : int = WavLMModel(lowerCAmelCase_ )
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ )
hf_wavlm.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase_ : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 414
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( __a ):
snake_case : Tuple = """lilt"""
def __init__(self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=None , lowerCAmelCase__=4 , lowerCAmelCase__=1_0_2_4 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : List[Any] = type_vocab_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : List[Any] = position_embedding_type
_UpperCAmelCase : Optional[int] = classifier_dropout
_UpperCAmelCase : Optional[Any] = channel_shrink_ratio
_UpperCAmelCase : Tuple = max_ad_position_embeddings
| 414
| 1
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
UpperCamelCase_ = False
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __lowerCamelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any]=32 ) -> Union[str, Any]:
set_seed(0 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[str] = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE__ :List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :int = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=UpperCamelCase_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE__ :List[str] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE__ :int = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase_ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE__ :Tuple = [torch.randint(0 , 10_00 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE__ :Tuple = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE__ :Dict = model(UpperCamelCase_ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE__ :List[str] = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase_ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE__ :Tuple = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCamelCase_ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE__ :Any = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) )
| 320
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : tuple , UpperCAmelCase__ : Path , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ :Optional[int] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
SCREAMING_SNAKE_CASE__ :Tuple = 'cpu'
SCREAMING_SNAKE_CASE__ :Tuple = Path(UpperCAmelCase__ )
# VAE DECODER
SCREAMING_SNAKE_CASE__ :Union[str, Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
SCREAMING_SNAKE_CASE__ :Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE__ :Dict = vae_decoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , 2_5 , 2_5 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=UpperCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCamelCase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 320
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.dummy_uncond_unet
lowerCamelCase_ = ScoreSdeVeScheduler()
lowerCamelCase_ = ScoreSdeVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
sde_ve.to(UpperCamelCase__ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase__ ).images
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=UpperCamelCase__ , return_dict=UpperCamelCase__ )[
0
]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = '''google/ncsnpp-church-256'''
lowerCamelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = ScoreSdeVeScheduler.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = ScoreSdeVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
sde_ve.to(UpperCamelCase__ )
sde_ve.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=UpperCamelCase__ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 142
|
"""simple docstring"""
__lowercase : Union[str, Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 142
| 1
|
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 700
|
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : Any = 'data2vec-vision'
def __init__( self : Any , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : int=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Tuple=224 , lowerCAmelCase : Any=16 , lowerCAmelCase : Any=3 , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=[3, 5, 7, 11] , lowerCAmelCase : int=[1, 2, 3, 6] , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=0.4 , lowerCAmelCase : Union[str, Any]=256 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Tuple=False , lowerCAmelCase : Dict=255 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : Optional[int] = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : int = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : Dict = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : Optional[int] = layer_norm_eps
__UpperCamelCase : Optional[Any] = image_size
__UpperCamelCase : int = patch_size
__UpperCamelCase : Optional[int] = num_channels
__UpperCamelCase : Tuple = use_mask_token
__UpperCamelCase : List[str] = use_absolute_position_embeddings
__UpperCamelCase : Union[str, Any] = use_relative_position_bias
__UpperCamelCase : str = use_shared_relative_position_bias
__UpperCamelCase : int = layer_scale_init_value
__UpperCamelCase : Optional[int] = drop_path_rate
__UpperCamelCase : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__UpperCamelCase : Optional[Any] = out_indices
__UpperCamelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__UpperCamelCase : Dict = use_auxiliary_head
__UpperCamelCase : Optional[Any] = auxiliary_loss_weight
__UpperCamelCase : Optional[Any] = auxiliary_channels
__UpperCamelCase : int = auxiliary_num_convs
__UpperCamelCase : Dict = auxiliary_concat_input
__UpperCamelCase : Tuple = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : Optional[int] = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-4
| 279
|
def A__ (snake_case : float , snake_case : int ) -> float:
if digit_amount > 0:
return round(number - int(snake_case ) , snake_case )
return number - int(snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 279
| 1
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_A )
class _lowercase ( _A ):
def __init__( self , **a ):
super().__init__(**a )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(a )
def lowercase__ ( self , **a ):
snake_case__ : List[str] ={}
snake_case__ : Optional[int] ={}
snake_case__ : Dict ={}
# preprocess args
if "points_per_batch" in kwargs:
snake_case__ : Any =kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
snake_case__ : Union[str, Any] =kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
snake_case__ : int =kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
snake_case__ : Optional[int] =kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
snake_case__ : Optional[int] =kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case__ : Tuple =kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
snake_case__ : Dict =kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
snake_case__ : Tuple =kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
snake_case__ : Optional[int] =kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
snake_case__ : List[Any] =kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
snake_case__ : List[str] =kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
snake_case__ : Union[str, Any] =kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , a , *a , a=None , a=None , **a ):
return super().__call__(a , *a , num_workers=a , batch_size=a , **a )
def lowercase__ ( self , a , a=6_4 , a = 0 , a = 5_1_2 / 1_5_0_0 , a = 3_2 , a = 1 , ):
snake_case__ : Dict =load_image(a )
snake_case__ : str =self.image_processor.size["""longest_edge"""]
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] =self.image_processor.generate_crop_boxes(
a , a , a , a , a , a )
snake_case__ : Any =self.image_processor(images=a , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
snake_case__ : List[Any] =self.get_inference_context()
with inference_context():
snake_case__ : Any =self._ensure_tensor_on_device(a , device=self.device )
snake_case__ : Dict =self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
snake_case__ : List[Any] =image_embeddings
snake_case__ : Optional[Any] =grid_points.shape[1]
snake_case__ : str =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , a , a ):
snake_case__ : List[str] =grid_points[:, i : i + points_per_batch, :, :]
snake_case__ : Any =input_labels[:, i : i + points_per_batch]
snake_case__ : Any =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowercase__ ( self , a , a=0.88 , a=0.95 , a=0 , a=1 , ):
snake_case__ : Optional[Any] =model_inputs.pop("""input_boxes""" )
snake_case__ : Tuple =model_inputs.pop("""is_last""" )
snake_case__ : str =model_inputs.pop("""original_sizes""" ).tolist()
snake_case__ : Dict =model_inputs.pop("""reshaped_input_sizes""" ).tolist()
snake_case__ : Optional[int] =self.model(**a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case__ : str =model_outputs["""pred_masks"""]
snake_case__ : List[str] =self.image_processor.post_process_masks(
a , a , a , a , binarize=a )
snake_case__ : Dict =model_outputs["""iou_scores"""]
snake_case__ , snake_case__ , snake_case__ : Optional[Any] =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , a , a , a , a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowercase__ ( self , a , a=False , a=False , a=0.7 , ):
snake_case__ : Dict =[]
snake_case__ : Union[str, Any] =[]
snake_case__ : Optional[int] =[]
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
snake_case__ : List[str] =torch.cat(a )
snake_case__ : str =torch.cat(a )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str =self.image_processor.post_process_for_mask_generation(
a , a , a , a )
snake_case__ : Union[str, Any] =defaultdict(a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(a )
snake_case__ : str ={}
if output_rle_mask:
snake_case__ : Optional[int] =rle_mask
if output_bboxes_mask:
snake_case__ : str =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 448
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCamelCase : Dict = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__lowerCamelCase : List[str] = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def lowercase__ ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowercase__ ( self , a , a , a = CHRF.CHAR_ORDER , a = CHRF.WORD_ORDER , a = CHRF.BETA , a = False , a = False , a = False , ):
snake_case__ : str =len(references[0] )
if any(len(a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
snake_case__ : List[Any] =[[refs[i] for refs in references] for i in range(a )]
snake_case__ : int =CHRF(a , a , a , a , a , a )
snake_case__ : int =sb_chrf.corpus_score(a , a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 448
| 1
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCamelCase__ ( _a):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Dict = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE : Tuple = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head")
SCREAMING_SNAKE_CASE : Dict = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head")
SCREAMING_SNAKE_CASE : List[str] = key.replace("heads.cmd.itm_head.cls" , "itm_head")
SCREAMING_SNAKE_CASE : List[Any] = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler")
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale")
SCREAMING_SNAKE_CASE : Dict = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head")
SCREAMING_SNAKE_CASE : str = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head")
SCREAMING_SNAKE_CASE : str = key.replace("mm_text_projection" , "flava.text_to_mm_projection")
SCREAMING_SNAKE_CASE : List[Any] = key.replace("mm_image_projection" , "flava.image_to_mm_projection")
SCREAMING_SNAKE_CASE : Optional[int] = key.replace("image_encoder.module" , "flava.image_model")
SCREAMING_SNAKE_CASE : Tuple = key.replace("text_encoder.module" , "flava.text_model")
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token")
SCREAMING_SNAKE_CASE : Dict = key.replace("mm_encoder.module" , "flava.multimodal_model")
SCREAMING_SNAKE_CASE : int = key.replace("text_projection" , "flava.text_projection")
SCREAMING_SNAKE_CASE : List[str] = key.replace("image_projection" , "flava.image_projection")
SCREAMING_SNAKE_CASE : Union[str, Any] = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE : Tuple = value
return upgrade
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a=None):
if config_path is not None:
SCREAMING_SNAKE_CASE : Any = FlavaConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : Tuple = FlavaConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = FlavaForPreTraining(_a).eval()
SCREAMING_SNAKE_CASE : int = convert_dalle_checkpoint(_a , _a , save_checkpoint=_a)
if os.path.exists(_a):
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_a , map_location="cpu")
else:
SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Tuple = upgrade_state_dict(_a , _a)
hf_model.load_state_dict(_a)
SCREAMING_SNAKE_CASE : List[Any] = hf_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[int] = count_parameters(_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = count_parameters(_a) + count_parameters(_a)
assert torch.allclose(_a , _a , atol=1E-3)
hf_model.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25
|
import logging
from transformers.configuration_utils import PretrainedConfig
A__ : Tuple = logging.getLogger(__name__)
class lowercase ( __UpperCamelCase ):
__a = """masked_bert"""
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="topK" , SCREAMING_SNAKE_CASE__="constant" , SCREAMING_SNAKE_CASE__=0.0 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = pruning_method
lowerCAmelCase__ : List[Any] = mask_init
lowerCAmelCase__ : Dict = mask_scale
| 233
| 0
|
"""simple docstring"""
def __UpperCamelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
_SCREAMING_SNAKE_CASE = generate_large_matrix()
_SCREAMING_SNAKE_CASE = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__snake_case = 0
__snake_case = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__snake_case = (left + right) // 2
__snake_case = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__snake_case = mid + 1
else:
__snake_case = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__snake_case = 0
__snake_case = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__snake_case = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__snake_case = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
__snake_case = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__snake_case = timeit(F'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_00 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 614
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 614
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 243
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowerCamelCase__ : List[Any] ) -> List[Any]:
if not nums:
raise ValueError("List is empty" )
return sum(A_ ) / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A__ : Dict = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 244
| 0
|
from __future__ import annotations
import bisect
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->Dict:
"""simple docstring"""
if hi < 0:
lowercase : Optional[Any] = len(_UpperCamelCase )
while lo < hi:
lowercase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase : str = mid + 1
else:
lowercase : Union[str, Any] = mid
return lo
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->List[str]:
"""simple docstring"""
if hi < 0:
lowercase : int = len(_UpperCamelCase )
while lo < hi:
lowercase : str = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase : Union[str, Any] = mid + 1
else:
lowercase : Optional[int] = mid
return lo
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->str:
"""simple docstring"""
sorted_collection.insert(bisect_left(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->Union[str, Any]:
"""simple docstring"""
sorted_collection.insert(bisect_right(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = 0
lowercase : Any = len(_UpperCamelCase ) - 1
while left <= right:
lowercase : List[str] = left + (right - left) // 2
lowercase : Any = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase : Optional[int] = midpoint - 1
else:
lowercase : List[Any] = midpoint + 1
return None
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase, _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
if right < left:
return None
lowercase : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase, _UpperCamelCase, midpoint + 1, _UpperCamelCase )
if __name__ == "__main__":
__a = input('''Enter numbers separated by comma:\n''').strip()
__a = sorted(int(item) for item in user_input.split(''','''))
__a = int(input('''Enter a single number to be found in the list:\n'''))
__a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 319
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowercase__ ) )
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [sequences]
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__=ZeroShotClassificationArgumentHandler() , *lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = args_parser
super().__init__(*lowercase__ , **lowercase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def __lowerCamelCase ( self , lowercase__ , lowercase__=True , lowercase__=True , lowercase__=TruncationStrategy.ONLY_FIRST , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.eos_token
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(
lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , padding=lowercase__ , truncation=lowercase__ , )
except Exception as e:
if "too short" in str(lowercase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(
lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , padding=lowercase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __lowerCamelCase ( self , **lowercase__ ):
"""simple docstring"""
if kwargs.get("multi_class" , lowercase__ ) is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ : int = kwargs["hypothesis_template"]
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if "multi_label" in kwargs:
SCREAMING_SNAKE_CASE_ : Tuple = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase__ , *lowercase__ , **lowercase__ , ):
"""simple docstring"""
if len(lowercase__ ) == 0:
pass
elif len(lowercase__ ) == 1 and "candidate_labels" not in kwargs:
SCREAMING_SNAKE_CASE_ : Any = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(lowercase__ , **lowercase__ )
def __lowerCamelCase ( self , lowercase__ , lowercase__=None , lowercase__="This example is {}." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = self._args_parser(lowercase__ , lowercase__ , lowercase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase__ , lowercase__ ) ):
SCREAMING_SNAKE_CASE_ : Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase__ ) - 1,
**model_input,
}
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = inputs["candidate_label"]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inputs["sequence"]
SCREAMING_SNAKE_CASE_ : Any = {k: inputs[k] for k in self.tokenizer.model_input_names}
SCREAMING_SNAKE_CASE_ : List[Any] = self.model(**lowercase__ )
SCREAMING_SNAKE_CASE_ : str = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def __lowerCamelCase ( self , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [outputs["candidate_label"] for outputs in model_outputs]
SCREAMING_SNAKE_CASE_ : Tuple = [outputs["sequence"] for outputs in model_outputs]
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
SCREAMING_SNAKE_CASE_ : Any = logits.shape[0]
SCREAMING_SNAKE_CASE_ : str = len(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = N // n
SCREAMING_SNAKE_CASE_ : List[str] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.entailment_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = -1 if entailment_id == 0 else 0
SCREAMING_SNAKE_CASE_ : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.exp(lowercase__ ) / np.exp(lowercase__ ).sum(-1 , keepdims=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
SCREAMING_SNAKE_CASE_ : List[Any] = reshaped_outputs[..., self.entailment_id]
SCREAMING_SNAKE_CASE_ : List[Any] = np.exp(lowercase__ ) / np.exp(lowercase__ ).sum(-1 , keepdims=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 68
|
'''simple docstring'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_ : int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_ : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case_ = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 68
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase__ =datasets.utils.logging.get_logger(__name__)
@dataclass
class A__( datasets.BuilderConfig ):
lowerCAmelCase = 1_00_00
lowerCAmelCase = None
lowerCAmelCase = None
class A__( datasets.ArrowBasedBuilder ):
lowerCAmelCase = ParquetConfig
def _a ( self : str ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def _a ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(__SCREAMING_SNAKE_CASE )}: {e}""" )
raise
| 482
|
"""simple docstring"""
lowerCAmelCase__ ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
__SCREAMING_SNAKE_CASE = input('''Enter message: ''' )
__SCREAMING_SNAKE_CASE = input('''Enter key [alphanumeric]: ''' )
__SCREAMING_SNAKE_CASE = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__SCREAMING_SNAKE_CASE = '''encrypt'''
__SCREAMING_SNAKE_CASE = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith('''d''' ):
__SCREAMING_SNAKE_CASE = '''decrypt'''
__SCREAMING_SNAKE_CASE = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(f"""\n{mode.title()}ed message:""" )
print(UpperCAmelCase__ )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , '''encrypt''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , '''decrypt''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = key.upper()
for symbol in message:
__SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 482
| 1
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase ( A_ )-> np.ndarray:
'''simple docstring'''
a , a , a : Union[str, Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def lowercase ( A_ )-> np.ndarray:
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def lowercase ( A_ , A_ )-> np.ndarray:
'''simple docstring'''
a : Optional[Any] = np.zeros_like(__UpperCamelCase )
a : Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
a : List[str] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
a : Any = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
a : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__lowercase = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
__lowercase = np.array(Image.open(lena_path))
# kernel to be applied
__lowercase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__lowercase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__lowercase = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 708
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Any = (IPNDMScheduler,)
UpperCAmelCase : Optional[int] = (("""num_inference_steps""", 5_0),)
def __snake_case ( self : Dict , **__UpperCAmelCase : Optional[Any]):
a : str = {"num_train_timesteps": 1000}
config.update(**__UpperCAmelCase)
return config
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any]=0 , **__UpperCAmelCase : Union[str, Any]):
a : List[Any] = dict(self.forward_default_kwargs)
a : int = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a : int = self.dummy_sample
a : str = 0.1 * sample
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a : List[Any] = self.get_scheduler_config(**__UpperCAmelCase)
a : List[str] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a : List[Any] = dummy_past_residuals[:]
if time_step is None:
a : Any = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a : List[Any] = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a : Optional[Any] = dummy_past_residuals[:]
a : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Dict = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a : Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : str = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : int):
pass
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str=0 , **__UpperCAmelCase : List[Any]):
a : List[str] = dict(self.forward_default_kwargs)
a : Any = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a : Tuple = self.dummy_sample
a : str = 0.1 * sample
a : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a : Optional[int] = self.get_scheduler_config()
a : List[str] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
a : Optional[int] = dummy_past_residuals[:]
if time_step is None:
a : Any = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a : List[Any] = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
a : str = dummy_past_residuals[:]
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Tuple = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : List[str] = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : str , **__UpperCAmelCase : Dict):
a : Tuple = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config(**__UpperCAmelCase)
a : Any = scheduler_class(**__UpperCAmelCase)
a : int = 10
a : Union[str, Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
for i, t in enumerate(scheduler.timesteps):
a : Tuple = model(__UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def __snake_case ( self : Optional[Any]):
a : List[Any] = dict(self.forward_default_kwargs)
a : List[str] = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
for scheduler_class in self.scheduler_classes:
a : Tuple = self.get_scheduler_config()
a : Any = scheduler_class(**__UpperCAmelCase)
a : Dict = self.dummy_sample
a : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps"):
scheduler.set_timesteps(__UpperCAmelCase)
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps"):
a : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a : Dict = dummy_past_residuals[:]
a : Optional[int] = scheduler.timesteps[5]
a : List[Any] = scheduler.timesteps[6]
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __snake_case ( self : Tuple):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def __snake_case ( self : int):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Optional[int] = self.full_loop()
a : List[str] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 2540529) < 10
| 135
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowercase_ : Tuple = 'CIDAS/clipseg-rd64-refined'
lowercase_ : Dict = 'image_segmenter'
lowercase_ : str = CLIPSegForImageSegmentation
lowercase_ : Optional[int] = ['image', 'text']
lowercase_ : Dict = ['image']
def __init__( self , *a_ , **a_ ) -> Union[str, Any]:
requires_backends(self , ["vision"] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _a ( self , a_ , a_ ) -> Dict:
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
def _a ( self , a_ ) -> Optional[Any]:
with torch.no_grad():
_UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def _a ( self , a_ ) -> Dict:
_UpperCAmelCase = outputs.cpu().detach().numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 657
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCAmelCase__ ( _a : Union[str, Any] , _a : Dict , _a : Optional[int] , _a : str ):
snake_case_ : int = s.rsplit(_a , _a )
return new.join(_a )
def lowerCAmelCase__ ( _a : Optional[int] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : Dict = {}
snake_case_ : List[Any] = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
snake_case_ : Tuple = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
snake_case_ : str = key.replace("res_path." , "res_path.path." )
if key.endswith(".w" ):
snake_case_ : List[Any] = rreplace(_a , ".w" , ".weight" , 1 )
if key.endswith(".b" ):
snake_case_ : List[Any] = rreplace(_a , ".b" , ".bias" , 1 )
snake_case_ : Dict = value.float()
return upgrade
@torch.no_grad()
def lowerCAmelCase__ ( _a : List[Any] , _a : Optional[int] , _a : Optional[Any]=None , _a : Optional[int]=True ):
from dall_e import Encoder
snake_case_ : Optional[Any] = Encoder()
if os.path.exists(_a ):
snake_case_ : Tuple = torch.load(_a )
else:
snake_case_ : Any = torch.hub.load_state_dict_from_url(_a )
if isinstance(_a , _a ):
snake_case_ : Optional[Any] = ckpt.state_dict()
encoder.load_state_dict(_a )
if config_path is not None:
snake_case_ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_a )
else:
snake_case_ : Union[str, Any] = FlavaImageCodebookConfig()
snake_case_ : int = FlavaImageCodebook(_a ).eval()
snake_case_ : int = encoder.state_dict()
snake_case_ : Optional[int] = upgrade_state_dict(_a )
hf_model.load_state_dict(_a )
snake_case_ : int = hf_model.state_dict()
snake_case_ : Optional[Any] = count_parameters(_a )
snake_case_ : Union[str, Any] = count_parameters(_a )
assert torch.allclose(_a , _a , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(_a )
else:
return hf_state_dict
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 568
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ : Tuple = logging.getLogger()
def _lowercase ( UpperCamelCase__ : Dict ):
__A : Any = {}
__A : Any = os.path.join(UpperCamelCase__, 'all_results.json' )
if os.path.exists(UpperCamelCase__ ):
with open(UpperCamelCase__, 'r' ) as f:
__A : Union[str, Any] = json.load(UpperCamelCase__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
UpperCAmelCase_ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
import xla_spawn
__A : List[Any] = self.get_auto_remove_tmp_dir()
__A : Any = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__lowercase , 'argv' , __lowercase ):
__A : Union[str, Any] = time()
xla_spawn.main()
__A : Any = time()
__A : List[str] = get_results(__lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def snake_case__ ( self ):
"""simple docstring"""
import xla_spawn
__A : int = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(__lowercase , 'argv' , __lowercase ):
xla_spawn.main()
| 711
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Dict = getLogger(__name__)
def _lowercase ( UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : int = 8, UpperCamelCase__ : int = 1024, UpperCamelCase__ : List[Any]="val", UpperCamelCase__ : int=None, UpperCamelCase__ : str=False, UpperCamelCase__ : int="summarization", UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : List[Any]=1, UpperCamelCase__ : Dict = None, UpperCamelCase__ : Optional[int]="", **UpperCamelCase__ : str, ):
__A : Dict = str(UpperCamelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl', rank=UpperCamelCase__ )
__A : Union[str, Any] = Path(UpperCamelCase__ )
__A : Optional[int] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(UpperCamelCase__ )
__A : Dict = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).cuda()
if fpaa:
__A : Optional[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(UpperCamelCase__, UpperCamelCase__ ) # update config with task specific params
__A : Any = generate_kwargs.pop('num_beams', model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__A : int = num_return_sequences
__A : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__A : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
__A : List[Any] = prefix or getattr(model.config, 'prefix', '' ) or ''
__A : Tuple = SeqaSeqDataset(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, max_target_length=1024, type_path=UpperCamelCase__, n_obs=UpperCamelCase__, prefix=UpperCamelCase__, **UpperCamelCase__, )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__A : Any = ds.make_sortish_sampler(UpperCamelCase__, distributed=UpperCamelCase__, add_extra_examples=UpperCamelCase__, shuffle=UpperCamelCase__ )
__A : Union[str, Any] = DataLoader(UpperCamelCase__, sampler=UpperCamelCase__, batch_size=UpperCamelCase__, collate_fn=ds.collate_fn )
__A : Tuple = []
for batch in tqdm(UpperCamelCase__ ):
__A : Any = model.generate(
input_ids=batch['input_ids'].to(model.device ), attention_mask=batch['attention_mask'].to(model.device ), num_return_sequences=UpperCamelCase__, num_beams=UpperCamelCase__, **UpperCamelCase__, )
__A : Dict = tokenizer.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__ )
__A : List[str] = batch['ids']
if num_return_sequences > 1:
__A : str = chunks(UpperCamelCase__, UpperCamelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(UpperCamelCase__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(UpperCamelCase__, UpperCamelCase__ )
return results, sampler.num_replicas
def _lowercase ( ):
__A : Optional[Any] = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir', type=UpperCamelCase__, help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name', type=UpperCamelCase__, help='like facebook/bart-large-cnn,t5-base, etc.', default='sshleifer/distilbart-xsum-12-3', )
parser.add_argument('--save_dir', type=UpperCamelCase__, help='where to save', default='tmp_gen' )
parser.add_argument('--max_source_length', type=UpperCamelCase__, default=UpperCamelCase__ )
parser.add_argument(
'--type_path', type=UpperCamelCase__, default='test', help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task', type=UpperCamelCase__, default='summarization', help='used for task_specific_params + metrics' )
parser.add_argument('--bs', type=UpperCamelCase__, default=8, required=UpperCamelCase__, help='batch size' )
parser.add_argument(
'--local_rank', type=UpperCamelCase__, default=-1, required=UpperCamelCase__, help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__, help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences', type=UpperCamelCase__, default=1, required=UpperCamelCase__, help='How many sequences to return' )
parser.add_argument(
'--sync_timeout', type=UpperCamelCase__, default=600, required=UpperCamelCase__, help='How long should master process wait for other processes to finish.', )
parser.add_argument('--src_lang', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__ )
parser.add_argument('--tgt_lang', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__ )
parser.add_argument(
'--prefix', type=UpperCamelCase__, required=UpperCamelCase__, default=UpperCamelCase__, help='will be added to the begininng of src examples' )
parser.add_argument('--fp16', action='store_true' )
parser.add_argument('--debug', action='store_true' )
__A : int = time.time()
__A ,__A : int = parser.parse_known_args()
__A : List[str] = parse_numeric_n_bool_cl_kwargs(UpperCamelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
__A : List[str] = Path(args.save_dir + '_tmp' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) # this handles locking.
__A : Optional[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__A : List[str] = {}
if args.src_lang is not None:
__A : Dict = args.src_lang
if args.tgt_lang is not None:
__A : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=UpperCamelCase__ )
__A ,__A : List[Any] = eval_data_dir(
args.data_dir, UpperCamelCase__, args.model_name, type_path=args.type_path, bs=args.bs, fpaa=args.fpaa, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=UpperCamelCase__, **UpperCamelCase__, )
if args.local_rank <= 0:
__A : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
__A : Dict = gather_results_from_each_node(UpperCamelCase__, UpperCamelCase__, args.sync_timeout )
__A : Union[str, Any] = combine_partial_results(UpperCamelCase__ )
if args.num_return_sequences > 1:
__A : Any = save_dir.joinpath('pseudolabel_results.json' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(UpperCamelCase__, UpperCamelCase__ )
return
__A : int = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(UpperCamelCase__ ) as f:
__A : str = [x.rstrip() for x in f.readlines()][: len(UpperCamelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__A : Tuple = 'translation' in args.task
__A : Union[str, Any] = calculate_bleu if calc_bleu else calculate_rouge
__A : Optional[Any] = 'bleu' if calc_bleu else 'rouge'
__A : Dict = score_fn(UpperCamelCase__, UpperCamelCase__ )
__A : Any = len(UpperCamelCase__ )
__A : Tuple = time.time() - start_time
__A : Tuple = round(runtime / metrics['n_obs'], 4 )
__A : int = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__A : List[Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(UpperCamelCase__, UpperCamelCase__, indent=UpperCamelCase__ )
print(UpperCamelCase__ )
write_txt_file(UpperCamelCase__, save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(UpperCamelCase__, save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : List[str] ):
__A : List[str] = []
for partial_result in partial_results:
records.extend(UpperCamelCase__ )
__A : List[Any] = sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x["id"] )
__A : Dict = [x['pred'] for x in records]
return preds
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : int ):
# WAIT FOR lots of .json files
__A : Dict = time.time()
logger.info('waiting for all nodes to finish' )
__A : int = None
while (time.time() - start_wait) < timeout:
__A : List[Any] = list(save_dir.glob('rank_*.json' ) )
if len(UpperCamelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__A : List[Any] = lmap(UpperCamelCase__, UpperCamelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 540
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 232
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__ = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
a__ = int(UpperCAmelCase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a__ = 2
a__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a__ = i
while n % i == 0:
a__ = n // i
i += 1
return int(UpperCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232
| 1
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase_ = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCAmelCase_ = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCAmelCase_ = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )->Dict:
return float((preds == labels).mean() )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple )->Dict:
_lowerCAmelCase = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str )->Optional[int]:
_lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = np.array(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = en_sentvecs.shape[0]
# mean centering
_lowerCAmelCase = en_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
_lowerCAmelCase = in_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
_lowerCAmelCase = cdist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '''cosine''' )
_lowerCAmelCase = np.array(range(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = sim.argsort(axis=1 )[:, :1_0]
_lowerCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_lowerCAmelCase , _lowerCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 664
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE = timm.create_model("levit_128s" , pretrained=lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = timm.create_model("levit_128" , pretrained=lowerCAmelCase_ )
if hidden_sizes == 192:
__SCREAMING_SNAKE_CASE = timm.create_model("levit_192" , pretrained=lowerCAmelCase_ )
if hidden_sizes == 256:
__SCREAMING_SNAKE_CASE = timm.create_model("levit_256" , pretrained=lowerCAmelCase_ )
if hidden_sizes == 384:
__SCREAMING_SNAKE_CASE = timm.create_model("levit_384" , pretrained=lowerCAmelCase_ )
from_model.eval()
__SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = from_model.state_dict()
__SCREAMING_SNAKE_CASE = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE = list(our_model.state_dict().keys() )
print(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for i in range(len(lowerCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.randn((2, 3, 224, 224) )
__SCREAMING_SNAKE_CASE = from_model(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = our_model(lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = name
print(lowerCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
__SCREAMING_SNAKE_CASE = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCAmelCase_ , names_to_config[model_name] , lowerCAmelCase_ , lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
a__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
a__ : str = parser.parse_args()
a__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a__ : Tuple = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 682
| 1
|
def __UpperCAmelCase ( __A , __A , __A , __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = len(__A ), len(grid[0] )
if (
min(__A , __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(__A , row + 1 , __A , __A )
count += depth_first_search(__A , row - 1 , __A , __A )
count += depth_first_search(__A , __A , col + 1 , __A )
count += depth_first_search(__A , __A , col - 1 , __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
class lowercase__ :
def __init__( self : List[str] , _lowercase : list ):
"""simple docstring"""
UpperCAmelCase__ = set_counts
UpperCAmelCase__ = max(_lowercase )
UpperCAmelCase__ = len(_lowercase )
UpperCAmelCase__ = [1] * num_sets
UpperCAmelCase__ = list(range(_lowercase ) )
def _UpperCAmelCase ( self : Dict , _lowercase : int , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.get_parent(_lowercase )
UpperCAmelCase__ = self.get_parent(_lowercase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCAmelCase__ = 0
UpperCAmelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCAmelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCAmelCase__ = 0
UpperCAmelCase__ = src_parent
UpperCAmelCase__ = self.set_counts[src_parent]
UpperCAmelCase__ = max(self.max_set , _lowercase )
return True
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
UpperCAmelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 277
| 0
|
from __future__ import annotations
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(_SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(_SCREAMING_SNAKE_CASE , n - 1 )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_A, _A = (
collection[index],
collection[index - 1],
)
insert_next(_SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
__A : str = input("Enter integers separated by spaces: ")
__A : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 27
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int]=13 , A : Tuple=7 , A : str=True , A : Union[str, Any]=True , A : str=True , A : Any=True , A : Optional[Any]=99 , A : List[str]=24 , A : Dict=2 , A : int=6 , A : Any=37 , A : Any="gelu" , A : str=0.1 , A : Dict=0.1 , A : List[Any]=512 , A : Union[str, Any]=16 , A : Any=2 , A : Optional[int]=0.02 , A : str=3 , A : List[Any]=None , A : str=1000 , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Union[str, Any] = use_input_mask
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : str = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : Union[str, Any] = scope
_UpperCAmelCase : List[str] = range_bbox
def _A ( self : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase : int = bbox[i, j, 3]
_UpperCAmelCase : Optional[Any] = bbox[i, j, 1]
_UpperCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase : List[str] = bbox[i, j, 2]
_UpperCAmelCase : Optional[Any] = bbox[i, j, 0]
_UpperCAmelCase : str = t
_UpperCAmelCase : List[Any] = None
if self.use_input_mask:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase : Any = None
if self.use_token_type_ids:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self : List[Any] ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _A ( self : Any , A : List[Any] , A : List[str] , A : Optional[Any] , A : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Optional[int] , ):
_UpperCAmelCase : Optional[int] = LiltModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , bbox=A , attention_mask=A , token_type_ids=A )
_UpperCAmelCase : Dict = model(A , bbox=A , token_type_ids=A )
_UpperCAmelCase : str = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self : Any , A : List[Any] , A : Optional[int] , A : str , A : List[str] , A : Union[str, Any] , A : Dict , A : int , ):
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Optional[int] = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : int , A : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : Dict , A : Any , A : Dict , ):
_UpperCAmelCase : Tuple = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : int ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase: List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: Dict = False
__UpperCamelCase: Optional[Any] = False
def _A ( self : str , A : int , A : Dict , A : List[Any] , A : List[str] , A : int ):
return True
def _A ( self : Dict ):
_UpperCAmelCase : Dict = LiltModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , hidden_size=37 )
def _A ( self : int ):
self.config_tester.run_common_tests()
def _A ( self : List[str] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _A ( self : int ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : str = type
self.model_tester.create_and_check_model(*A )
def _A ( self : str ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def _A ( self : List[Any] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
_UpperCAmelCase : str = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A )
_UpperCAmelCase : Optional[Any] = torch.tensor([[1, 2]] , device=A )
_UpperCAmelCase : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(input_ids=A , bbox=A )
_UpperCAmelCase : Optional[int] = torch.Size([1, 2, 768] )
_UpperCAmelCase : Dict = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1E-3 ) )
| 244
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = XLMProphetNetTokenizer
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :List[str] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : str = XLMProphetNetTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "[PAD]"
UpperCamelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(A_ ) , 1012 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = XLMProphetNetTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase : Dict = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase : Any = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = "Hello World!"
UpperCamelCase : str = [3_5389, 6672, 49, 2]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 38
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase :Tuple = 'BlipImageProcessor'
_UpperCAmelCase :Optional[int] = 'AutoTokenizer'
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = False
super().__init__(A_ , A_ )
UpperCamelCase : str = self.image_processor
def __call__( self , A_ = None , A_ = None , A_ = True , A_ = False , A_ = None , A_ = None , A_ = 0 , A_ = None , A_ = None , A_ = False , A_ = False , A_ = False , A_ = False , A_ = False , A_ = True , A_ = None , **A_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCamelCase : int = self.tokenizer
UpperCamelCase : Optional[int] = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
return text_encoding
# add pixel_values
UpperCamelCase : int = self.image_processor(A_ , return_tensors=A_ )
if text is not None:
UpperCamelCase : Dict = self.tokenizer(
text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , )
else:
UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(A_ )
return encoding_image_processor
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer.model_input_names
UpperCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 38
| 1
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__magic_name__ = '<<<<<<< This should probably be modified because it mentions: '
__magic_name__ = '=======\n>>>>>>>\n'
__magic_name__ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__magic_name__ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def lowerCamelCase ( lowerCamelCase : Namespace):
return ConvertCommand(args.tfds_path , args.datasets_directory)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _a ( _a : ArgumentParser ):
'''simple docstring'''
A_ : Union[str, Any] = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self : Optional[int] ,_a : str ,_a : str ,*_a : int ):
'''simple docstring'''
A_ : Union[str, Any] = get_logger("""datasets-cli/converting""" )
A_ : Any = tfds_path
A_ : Union[str, Any] = datasets_directory
def _a ( self : Any ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
A_ : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
A_ : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
A_ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
A_ : Any = []
A_ : str = []
A_ : List[str] = {}
if os.path.isdir(self._tfds_path ):
A_ : List[str] = os.listdir(__lowerCAmelCase )
else:
A_ : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
A_ : str = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
A_ : Union[str, Any] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
if not os.path.isfile(__lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(__lowerCAmelCase ,encoding="""utf-8""" ) as f:
A_ : List[Any] = f.readlines()
A_ : Dict = []
A_ : Optional[Any] = False
A_ : Union[str, Any] = False
A_ : Optional[int] = []
for line in lines:
A_ : Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
A_ : str = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
A_ : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
A_ : Dict = """from datasets import logging\n"""
elif "getLogger" in out_line:
A_ : List[Any] = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
A_ : Any = True
A_ : Tuple = list(filter(lambda _a : e in out_line ,__lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCAmelCase ) + """\n""" )
out_lines.append(__lowerCAmelCase )
out_lines.append(__lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
A_ : Union[str, Any] = re.sub(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
A_ : Any = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,__lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
A_ : Optional[Any] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
A_ : str = True
out_lines.append(__lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
A_ : int = f_name.replace(""".py""" ,"""""" )
A_ : Optional[Any] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
A_ : List[Any] = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(__lowerCAmelCase )
with open(__lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(__lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
A_ : Union[str, Any] = os.path.basename(__lowerCAmelCase )
A_ : Dict = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(__lowerCAmelCase ,__lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 665
|
def __lowerCAmelCase ( A_ : str ) -> str:
__UpperCAmelCase = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __lowerCAmelCase ( A_ : str ) -> dict[str, str]:
__UpperCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__UpperCAmelCase = remove_duplicates(key.upper() )
__UpperCAmelCase = len(A_ )
# First fill cipher with key characters
__UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(A_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A_ ) , 26 ):
__UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__UpperCAmelCase = alphabet[i - offset]
__UpperCAmelCase = char
return cipher_alphabet
def __lowerCAmelCase ( A_ : str , A_ : dict[str, str] ) -> str:
return "".join(cipher_map.get(A_ , A_ ) for ch in message.upper() )
def __lowerCAmelCase ( A_ : str , A_ : dict[str, str] ) -> str:
__UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A_ , A_ ) for ch in message.upper() )
def __lowerCAmelCase ( ) -> None:
__UpperCAmelCase = input("Enter message to encode or decode: " ).strip()
__UpperCAmelCase = input("Enter keyword: " ).strip()
__UpperCAmelCase = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__UpperCAmelCase = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__UpperCAmelCase = create_cipher_map(A_ )
print(func(A_ , A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 221
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase ={
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
__UpperCamelCase =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase =BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=SCREAMING_SNAKE_CASE__ , output_all_encodings=SCREAMING_SNAKE_CASE__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , SCREAMING_SNAKE_CASE__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase ='openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase =os.path.join(get_home_dir() , 'models' )
__UpperCamelCase =_load_vocab(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =nlp.model.BERTModel(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=SCREAMING_SNAKE_CASE__ , use_token_type_embed=SCREAMING_SNAKE_CASE__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=SCREAMING_SNAKE_CASE__ , use_decoder=SCREAMING_SNAKE_CASE__ , )
original_bort.load_parameters(SCREAMING_SNAKE_CASE__ , cast_dtype=SCREAMING_SNAKE_CASE__ , ignore_extra=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase ={
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(SCREAMING_SNAKE_CASE__ ),
}
__UpperCamelCase =BertConfig.from_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BertForMaskedLM(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(SCREAMING_SNAKE_CASE__ : Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =hf_param.shape
__UpperCamelCase =to_torch(params[gluon_param] )
__UpperCamelCase =gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__UpperCamelCase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase =hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase =layer.attention.self
__UpperCamelCase =check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__UpperCamelCase =check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__UpperCamelCase =check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__UpperCamelCase =check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__UpperCamelCase =check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__UpperCamelCase =check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__UpperCamelCase =layer.attention.output
__UpperCamelCase =check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__UpperCamelCase =check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__UpperCamelCase =check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__UpperCamelCase =check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__UpperCamelCase =layer.intermediate
__UpperCamelCase =check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__UpperCamelCase =check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__UpperCamelCase =layer.output
__UpperCamelCase =check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__UpperCamelCase =check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__UpperCamelCase =check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__UpperCamelCase =check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase =RobertaTokenizer.from_pretrained('roberta-base' )
__UpperCamelCase =tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ )['input_ids']
# Get gluon output
__UpperCamelCase =mx.nd.array([input_ids] )
__UpperCamelCase =original_bort(inputs=SCREAMING_SNAKE_CASE__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
hf_bort_model.eval()
__UpperCamelCase =tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__UpperCamelCase =hf_bort_model(**SCREAMING_SNAKE_CASE__ )[0]
__UpperCamelCase =output_gluon[0].asnumpy()
__UpperCamelCase =output_hf[0].detach().numpy()
__UpperCamelCase =np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase =np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 682
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.